code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <title>Chord Lookup</title> <!-- <link rel="stylesheet" href="$views/css/image.css"> <link rel="stylesheet" href="$views/css/list.css"> <link rel="stylesheet" href="$views/css/buttons.css"> --> <link rel="stylesheet" href="css/style.css"> </head> <body> <p id="notification-bar"><small>No notifications.</small></p> <div id="container"> <iframe id="iframe" src="http://tabs.ultimate-guitar.com/" frameBorder="0"> <p>Your browser does not support iframes.</p> </iframe> </div> <script src="scripts/jquery-1.11.0.js"></script> <script src="scripts/my-script.js"></script> </body> </html>
benjaminran/chordlookup
index.html
HTML
bsd-3-clause
787
<?php use yii\helpers\Html; use yii\helpers\ArrayHelper; use yii\widgets\DetailView; use yii\widgets\ActiveForm; use yii\helpers\Url; /* @var $this yii\web\View */ /* @var $model backend\models\Product */ $this->title = $model->title; $this->params['breadcrumbs'][] = ['label' => 'Products', 'url' => ['index']]; $this->params['breadcrumbs'][] = $this->title; ?> <div class="product-view"> <h1><?= Html::encode($this->title) ?></h1> <p> <?= Html::a('Update', ['update', 'id' => $model->id], ['class' => 'btn btn-primary']) ?> <?= Html::a('Delete', ['delete', 'id' => $model->id], [ 'class' => 'btn btn-danger', 'data' => [ 'confirm' => 'Are you sure you want to delete this item?', 'method' => 'post', ], ]) ?> </p> <?= DetailView::widget([ 'model' => $model, 'attributes' => [ 'id', 'title', 'brand', 'description:ntext', 'short_description:ntext', [ 'label' => 'Картинка', 'format' => 'raw', 'value' => Html::img('@web/images/products/titles/' . $model->image, ['alt' => $model->image_alt, 'width' => 150, 'height' => 150]), ], 'image_alt', 'slug_ru', 'availability', 'price', 'old_price', 'currency', 'vendor', 'rating', 'rating_count', 'top', [ 'attribute' => 'user_id', 'value' => $model->user->username ], [ 'attribute' => 'category_id', 'value' => $model->category->name ], ], ]) ?> <article> <h2>Картинки</h2> <?php foreach ($model->images as $image): ?> <p><?= Html::img('@web/images/products/galleries/' . $image->name, ['alt' => $image->alt, 'width' => 150, 'height' => 150]); ?> <?= Html::a('Удалить', ['products/delete-gallery-image'], ['class' => 'btn btn-default', 'onclick'=>"$(this);//for jui dialog in my page $.ajax({ type :'POST', cache : false, url : '/admin/products/delete-gallery-image', data: {id: $image->id}, success : function(response) { if (response) location.reload(); } });return false;", ]); ?></p> <?php endforeach ?> </article> </div>
Ravend6/php_yii2_ecommerce
backend/views/products/view.php
PHP
bsd-3-clause
2,703
//------------------------------------------------------------------------------ // CLING - the C++ LLVM-based InterpreterG :) // author: Vassil Vassilev <vasil.georgiev.vasilev@cern.ch> // // This file is dual-licensed: you can choose to license it under the University // of Illinois Open Source License or the GNU Lesser General Public License. See // LICENSE.TXT for details. //------------------------------------------------------------------------------ // Updates for Chimera // - 2019-02-02 Updated CreateNestedNameSpecifierForScopeOf() to consider // TemplateSpecializationType as well to fix #228 // - 2018-10-07 Fixed #216 #include "cling_utils_AST.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/GlobalDecl.h" #include "clang/Sema/Sema.h" #include "clang/Sema/Lookup.h" #include "clang/AST/DeclTemplate.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "clang/AST/Mangle.h" #include <memory> #include <stdio.h> // LLVM <3.8 require separate arguments for data and length, while later versions // can directly take an array reference (e.g. implicitly casting from std::vector). #if LLVM_VERSION_MAJOR > 3 || (LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR >= 8) #define ARRAY_COMPAT(_container_) _container_ #else #define ARRAY_COMPAT(_container_) _container_.data(), _container_.size() #endif using namespace clang; namespace { template<typename D> static D* LookupResult2Decl(clang::LookupResult& R) { if (R.empty()) return 0; R.resolveKind(); if (R.isSingleResult()) return dyn_cast<D>(R.getFoundDecl()); return (D*)-1; } } namespace cling { namespace utils { static QualType GetPartiallyDesugaredTypeImpl(const ASTContext& Ctx, QualType QT, const Transform::Config& TypeConfig, bool fullyQualifyType, bool fullyQualifyTmpltArg); static NestedNameSpecifier* GetPartiallyDesugaredNNS(const ASTContext& Ctx, NestedNameSpecifier* scope, const Transform::Config& TypeConfig); static NestedNameSpecifier* CreateNestedNameSpecifierForScopeOf(const ASTContext& Ctx, const Decl *decl, bool FullyQualified); static NestedNameSpecifier* GetFullyQualifiedNameSpecifier(const ASTContext& Ctx, NestedNameSpecifier* scope); bool Analyze::IsWrapper(const FunctionDecl* ND) { if (!ND) return false; if (!ND->getDeclName().isIdentifier()) return false; return ND->getName().startswith(Synthesize::UniquePrefix); } void Analyze::maybeMangleDeclName(const GlobalDecl& GD, std::string& mangledName) { // copied and adapted from CodeGen::CodeGenModule::getMangledName NamedDecl* D = cast<NamedDecl>(const_cast<Decl*>(GD.getDecl())); std::unique_ptr<MangleContext> mangleCtx; mangleCtx.reset(D->getASTContext().createMangleContext()); if (!mangleCtx->shouldMangleDeclName(D)) { IdentifierInfo *II = D->getIdentifier(); assert(II && "Attempt to mangle unnamed decl."); mangledName = II->getName(); return; } llvm::raw_string_ostream RawStr(mangledName); switch(D->getKind()) { case Decl::CXXConstructor: //Ctor_Complete, // Complete object ctor //Ctor_Base, // Base object ctor //Ctor_CompleteAllocating // Complete object allocating ctor (unused) mangleCtx->mangleCXXCtor(cast<CXXConstructorDecl>(D), GD.getCtorType(), RawStr); break; case Decl::CXXDestructor: //Dtor_Deleting, // Deleting dtor //Dtor_Complete, // Complete object dtor //Dtor_Base // Base object dtor #if defined(LLVM_ON_WIN32) // MicrosoftMangle.cpp:954 calls llvm_unreachable when mangling Dtor_Comdat if (GD.getDtorType() == Dtor_Comdat) { if (const IdentifierInfo* II = D->getIdentifier()) RawStr << II->getName(); } else #endif { mangleCtx->mangleCXXDtor(cast<CXXDestructorDecl>(D), GD.getDtorType(), RawStr); } break; default : mangleCtx->mangleName(D, RawStr); break; } RawStr.flush(); } //////////////////////////// // DISABLED FOR CHIMERA: // This function disabled because it's incompatible with LLVM (>=6) //////////////////////////// // // Expr* Analyze::GetOrCreateLastExpr(FunctionDecl* FD, // int* FoundAt /*=0*/, // bool omitDeclStmts /*=true*/, // Sema* S /*=0*/) { // assert(FD && "We need a function declaration!"); // assert((omitDeclStmts || S) // && "Sema needs to be set when omitDeclStmts is false"); // if (FoundAt) // *FoundAt = -1; // // Expr* result = 0; // if (CompoundStmt* CS = dyn_cast<CompoundStmt>(FD->getBody())) { // ArrayRef<Stmt*> Stmts // = llvm::makeArrayRef(CS->body_begin(), CS->size()); // int indexOfLastExpr = Stmts.size(); // while(indexOfLastExpr--) { // if (!isa<NullStmt>(Stmts[indexOfLastExpr])) // break; // } // // if (FoundAt) // *FoundAt = indexOfLastExpr; // // if (indexOfLastExpr < 0) // return 0; // // if ( (result = dyn_cast<Expr>(Stmts[indexOfLastExpr])) ) // return result; // if (!omitDeclStmts) // if (DeclStmt* DS = dyn_cast<DeclStmt>(Stmts[indexOfLastExpr])) { // std::vector<Stmt*> newBody = Stmts.vec(); // for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(), // E = DS->decl_rend(); I != E; ++I) { // if (VarDecl* VD = dyn_cast<VarDecl>(*I)) { // // Change the void function's return type // // We can't PushDeclContext, because we don't have scope. // Sema::ContextRAII pushedDC(*S, FD); // // QualType VDTy = VD->getType().getNonReferenceType(); // // Get the location of the place we will insert. // SourceLocation Loc // = newBody[indexOfLastExpr]->getLocEnd().getLocWithOffset(1); // Expr* DRE = S->BuildDeclRefExpr(VD, VDTy,VK_LValue, Loc).get(); // assert(DRE && "Cannot be null"); // indexOfLastExpr++; // newBody.insert(newBody.begin() + indexOfLastExpr, DRE); // // // Attach the new body (note: it does dealloc/alloc of all nodes) // CS->setStmts(S->getASTContext(), ARRAY_COMPAT(newBody)); // if (FoundAt) // *FoundAt = indexOfLastExpr; // return DRE; // } // } // } // // return result; // } // // return result; // } const char* const Synthesize::UniquePrefix = "__cling_Un1Qu3"; IntegerLiteral* Synthesize::IntegerLiteralExpr(ASTContext& C, uintptr_t Ptr) { const llvm::APInt Addr(8 * sizeof(void*), Ptr); return IntegerLiteral::Create(C, Addr, C.getUIntPtrType(), SourceLocation()); } Expr* Synthesize::CStyleCastPtrExpr(Sema* S, QualType Ty, uintptr_t Ptr) { ASTContext& Ctx = S->getASTContext(); return CStyleCastPtrExpr(S, Ty, Synthesize::IntegerLiteralExpr(Ctx, Ptr)); } Expr* Synthesize::CStyleCastPtrExpr(Sema* S, QualType Ty, Expr* E) { ASTContext& Ctx = S->getASTContext(); if (!Ty->isPointerType()) Ty = Ctx.getPointerType(Ty); TypeSourceInfo* TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation()); Expr* Result = S->BuildCStyleCastExpr(SourceLocation(), TSI,SourceLocation(),E).get(); assert(Result && "Cannot create CStyleCastPtrExpr"); return Result; } static bool GetFullyQualifiedTemplateName(const ASTContext& Ctx, TemplateName &tname) { bool changed = false; NestedNameSpecifier *NNS = 0; TemplateDecl *argtdecl = tname.getAsTemplateDecl(); QualifiedTemplateName *qtname = tname.getAsQualifiedTemplateName(); if (qtname && !qtname->hasTemplateKeyword()) { NNS = qtname->getQualifier(); NestedNameSpecifier *qNNS = GetFullyQualifiedNameSpecifier(Ctx,NNS); if (qNNS != NNS) { changed = true; NNS = qNNS; } else { NNS = 0; } } else { NNS = CreateNestedNameSpecifierForScopeOf(Ctx, argtdecl, true); } if (NNS) { tname = Ctx.getQualifiedTemplateName(NNS, /*TemplateKeyword=*/ false, argtdecl); changed = true; } return changed; } static bool GetFullyQualifiedTemplateArgument(const ASTContext& Ctx, TemplateArgument &arg) { bool changed = false; // Note: we do not handle TemplateArgument::Expression, to replace it // we need the information for the template instance decl. // See GetPartiallyDesugaredTypeImpl if (arg.getKind() == TemplateArgument::Template) { TemplateName tname = arg.getAsTemplate(); changed = GetFullyQualifiedTemplateName(Ctx, tname); if (changed) { arg = TemplateArgument(tname); } } else if (arg.getKind() == TemplateArgument::Type) { QualType SubTy = arg.getAsType(); // Check if the type needs more desugaring and recurse. QualType QTFQ = TypeName::GetFullyQualifiedType(SubTy, Ctx); if (QTFQ != SubTy) { arg = TemplateArgument(QTFQ); changed = true; } } else if (arg.getKind() == TemplateArgument::Pack) { SmallVector<TemplateArgument, 2> desArgs; for (auto I = arg.pack_begin(), E = arg.pack_end(); I != E; ++I) { TemplateArgument pack_arg(*I); changed = GetFullyQualifiedTemplateArgument(Ctx,pack_arg); desArgs.push_back(pack_arg); } if (changed) { // The allocator in ASTContext is mutable ... // Keep the argument const to be inline will all the other interfaces // like: NestedNameSpecifier::Create ASTContext &mutableCtx( const_cast<ASTContext&>(Ctx) ); arg = TemplateArgument::CreatePackCopy(mutableCtx, ARRAY_COMPAT(desArgs)); } } return changed; } static const Type* GetFullyQualifiedLocalType(const ASTContext& Ctx, const Type *typeptr) { // We really just want to handle the template parameter if any .... // In case of template specializations iterate over the arguments and // fully qualify them as well. if (const TemplateSpecializationType* TST = llvm::dyn_cast<const TemplateSpecializationType>(typeptr)) { bool mightHaveChanged = false; llvm::SmallVector<TemplateArgument, 4> desArgs; for (TemplateSpecializationType::iterator I = TST->begin(), E = TST->end(); I != E; ++I) { // cheap to copy and potentially modified by // GetFullyQualifedTemplateArgument TemplateArgument arg(*I); mightHaveChanged |= GetFullyQualifiedTemplateArgument(Ctx,arg); desArgs.push_back(arg); } // If desugaring happened allocate new type in the AST. if (mightHaveChanged) { QualType QT = Ctx.getTemplateSpecializationType(TST->getTemplateName(), ARRAY_COMPAT(desArgs), TST->getCanonicalTypeInternal()); return QT.getTypePtr(); } } else if (const RecordType *TSTRecord = llvm::dyn_cast<const RecordType>(typeptr)) { // We are asked to fully qualify and we have a Record Type, // which can point to a template instantiation with no sugar in any of // its template argument, however we still need to fully qualify them. if (const ClassTemplateSpecializationDecl* TSTdecl = llvm::dyn_cast<ClassTemplateSpecializationDecl>(TSTRecord->getDecl())) { const TemplateArgumentList& templateArgs = TSTdecl->getTemplateArgs(); bool mightHaveChanged = false; llvm::SmallVector<TemplateArgument, 4> desArgs; for(unsigned int I = 0, E = templateArgs.size(); I != E; ++I) { // cheap to copy and potentially modified by // GetFullyQualifedTemplateArgument TemplateArgument arg(templateArgs[I]); mightHaveChanged |= GetFullyQualifiedTemplateArgument(Ctx,arg); desArgs.push_back(arg); } // If desugaring happened allocate new type in the AST. if (mightHaveChanged) { TemplateName TN(TSTdecl->getSpecializedTemplate()); QualType QT = Ctx.getTemplateSpecializationType(TN, ARRAY_COMPAT(desArgs), TSTRecord->getCanonicalTypeInternal()); return QT.getTypePtr(); } } } return typeptr; } static NestedNameSpecifier* CreateOuterNNS(const ASTContext& Ctx, const Decl* D, bool FullyQualify) { const DeclContext* DC = D->getDeclContext(); if (const NamespaceDecl* NS = dyn_cast<NamespaceDecl>(DC)) { while (NS && NS->isInline()) { // Ignore inline namespace; NS = dyn_cast_or_null<NamespaceDecl>(NS->getDeclContext()); } if (NS->getDeclName()) return TypeName::CreateNestedNameSpecifier(Ctx, NS); return 0; // no starting '::', no anonymous } else if (const TagDecl* TD = dyn_cast<TagDecl>(DC)) { return TypeName::CreateNestedNameSpecifier(Ctx, TD, FullyQualify); } else if (const TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(DC)) { return TypeName::CreateNestedNameSpecifier(Ctx, TDD, FullyQualify); } return 0; // no starting '::' } static NestedNameSpecifier* GetFullyQualifiedNameSpecifier(const ASTContext& Ctx, NestedNameSpecifier* scope) { // Return a fully qualified version of this name specifier if (scope->getKind() == NestedNameSpecifier::Global) { // Already fully qualified. return scope; } if (const Type *type = scope->getAsType()) { // Find decl context. const TagDecl* TD = 0; if (const TagType* tagdecltype = dyn_cast<TagType>(type)) { TD = tagdecltype->getDecl(); } else { TD = type->getAsCXXRecordDecl(); } if (TD) { return TypeName::CreateNestedNameSpecifier(Ctx, TD, true /*FullyQualified*/); } else if (const TypedefType* TDD = dyn_cast<TypedefType>(type)) { return TypeName::CreateNestedNameSpecifier(Ctx, TDD->getDecl(), true /*FullyQualified*/); } } else if (const NamespaceDecl* NS = scope->getAsNamespace()) { return TypeName::CreateNestedNameSpecifier(Ctx, NS); } else if (const NamespaceAliasDecl* alias = scope->getAsNamespaceAlias()) { const NamespaceDecl* NS = alias->getNamespace()->getCanonicalDecl(); return TypeName::CreateNestedNameSpecifier(Ctx, NS); } return scope; } static NestedNameSpecifier* SelectPrefix(const ASTContext& Ctx, const DeclContext *declContext, NestedNameSpecifier *original_prefix, const Transform::Config& TypeConfig) { // We have to also desugar the prefix. NestedNameSpecifier* prefix = 0; if (declContext) { // We had a scope prefix as input, let see if it is still // the same as the scope of the result and if it is, then // we use it. if (declContext->isNamespace()) { // Deal with namespace. This is mostly about dealing with // namespace aliases (i.e. keeping the one the user used). const NamespaceDecl *new_ns =dyn_cast<NamespaceDecl>(declContext); if (new_ns) { new_ns = new_ns->getCanonicalDecl(); NamespaceDecl *old_ns = 0; if (original_prefix) { original_prefix->getAsNamespace(); if (NamespaceAliasDecl *alias = original_prefix->getAsNamespaceAlias()) { old_ns = alias->getNamespace()->getCanonicalDecl(); } } if (old_ns == new_ns) { // This is the same namespace, use the original prefix // as a starting point. prefix = GetFullyQualifiedNameSpecifier(Ctx,original_prefix); } else { prefix = TypeName::CreateNestedNameSpecifier(Ctx, dyn_cast<NamespaceDecl>(new_ns)); } } } else { const CXXRecordDecl* newtype=dyn_cast<CXXRecordDecl>(declContext); if (newtype && original_prefix) { // Deal with a class const Type *oldtype = original_prefix->getAsType(); if (oldtype && // NOTE: Should we compare the RecordDecl instead? oldtype->getAsCXXRecordDecl() == newtype) { // This is the same type, use the original prefix as a starting // point. prefix = GetPartiallyDesugaredNNS(Ctx,original_prefix,TypeConfig); } else { const TagDecl *tdecl = dyn_cast<TagDecl>(declContext); if (tdecl) { prefix = TypeName::CreateNestedNameSpecifier(Ctx, tdecl, false /*FullyQualified*/); } } } else { // We should only create the nested name specifier // if the outer scope is really a TagDecl. // It could also be a CXXMethod for example. const TagDecl *tdecl = dyn_cast<TagDecl>(declContext); if (tdecl) { prefix = TypeName::CreateNestedNameSpecifier(Ctx,tdecl, false /*FullyQualified*/); } } } } else { prefix = GetFullyQualifiedNameSpecifier(Ctx,original_prefix); } return prefix; } static NestedNameSpecifier* SelectPrefix(const ASTContext& Ctx, const ElaboratedType *etype, NestedNameSpecifier *original_prefix, const Transform::Config& TypeConfig) { // We have to also desugar the prefix. NestedNameSpecifier* prefix = etype->getQualifier(); if (original_prefix && prefix) { // We had a scope prefix as input, let see if it is still // the same as the scope of the result and if it is, then // we use it. const Type *newtype = prefix->getAsType(); if (newtype) { // Deal with a class const Type *oldtype = original_prefix->getAsType(); if (oldtype && // NOTE: Should we compare the RecordDecl instead? oldtype->getAsCXXRecordDecl() == newtype->getAsCXXRecordDecl()) { // This is the same type, use the original prefix as a starting // point. prefix = GetPartiallyDesugaredNNS(Ctx,original_prefix,TypeConfig); } else { prefix = GetPartiallyDesugaredNNS(Ctx,prefix,TypeConfig); } } else { // Deal with namespace. This is mostly about dealing with // namespace aliases (i.e. keeping the one the user used). const NamespaceDecl *new_ns = prefix->getAsNamespace(); if (new_ns) { new_ns = new_ns->getCanonicalDecl(); } else if (NamespaceAliasDecl *alias = prefix->getAsNamespaceAlias() ) { new_ns = alias->getNamespace()->getCanonicalDecl(); } if (new_ns) { const NamespaceDecl *old_ns = original_prefix->getAsNamespace(); if (old_ns) { old_ns = old_ns->getCanonicalDecl(); } else if (NamespaceAliasDecl *alias = original_prefix->getAsNamespaceAlias()) { old_ns = alias->getNamespace()->getCanonicalDecl(); } if (old_ns == new_ns) { // This is the same namespace, use the original prefix // as a starting point. prefix = GetFullyQualifiedNameSpecifier(Ctx,original_prefix); } else { prefix = GetFullyQualifiedNameSpecifier(Ctx,prefix); } } else { prefix = GetFullyQualifiedNameSpecifier(Ctx,prefix); } } } return prefix; } static NestedNameSpecifier* GetPartiallyDesugaredNNS(const ASTContext& Ctx, NestedNameSpecifier* scope, const Transform::Config& TypeConfig) { // Desugar the scope qualifier if needed. if (const Type* scope_type = scope->getAsType()) { // this is not a namespace, so we might need to desugar QualType desugared = GetPartiallyDesugaredTypeImpl(Ctx, QualType(scope_type,0), TypeConfig, /*qualifyType=*/false, /*qualifyTmpltArg=*/true); NestedNameSpecifier* outer_scope = scope->getPrefix(); const ElaboratedType* etype = dyn_cast<ElaboratedType>(desugared.getTypePtr()); if (etype) { // The desugarding returned an elaborated type even-though we // did not request it (/*fullyQualify=*/false), so we must have been // looking a typedef pointing at a (or another) scope. if (outer_scope) { outer_scope = SelectPrefix(Ctx,etype,outer_scope,TypeConfig); } else { outer_scope = GetPartiallyDesugaredNNS(Ctx,etype->getQualifier(), TypeConfig); } desugared = etype->getNamedType(); } else { Decl* decl = 0; const TypedefType* typedeftype = dyn_cast_or_null<TypedefType>(&(*desugared)); if (typedeftype) { decl = typedeftype->getDecl(); } else { // There are probably other cases ... const TagType* tagdecltype = dyn_cast_or_null<TagType>(&(*desugared)); if (tagdecltype) { decl = tagdecltype->getDecl(); } else { decl = desugared->getAsCXXRecordDecl(); } } if (decl) { NamedDecl* outer = dyn_cast_or_null<NamedDecl>(decl->getDeclContext()); NamespaceDecl* outer_ns = dyn_cast_or_null<NamespaceDecl>(decl->getDeclContext()); if (outer && !(outer_ns && outer_ns->isAnonymousNamespace()) && outer->getName().size() ) { outer_scope = SelectPrefix(Ctx,decl->getDeclContext(), outer_scope,TypeConfig); } else { outer_scope = 0; } } else if (outer_scope) { outer_scope = GetPartiallyDesugaredNNS(Ctx, outer_scope, TypeConfig); } } return NestedNameSpecifier::Create(Ctx,outer_scope, false /* template keyword wanted */, desugared.getTypePtr()); } else { return GetFullyQualifiedNameSpecifier(Ctx,scope); } } bool Analyze::IsStdOrCompilerDetails(const NamedDecl &decl) { // Return true if the TagType is a 'details' of the std implementation // or declared within std. // Details means (For now) declared in __gnu_cxx or starting with // underscore. IdentifierInfo *info = decl.getDeclName().getAsIdentifierInfo(); if (info && info->getNameStart()[0] == '_') { // We have a name starting by _, this is reserve for compiler // implementation, so let's not desugar to it. return true; } // And let's check if it is in one of the know compiler implementation // namespace. const NamedDecl *outer =dyn_cast_or_null<NamedDecl>(decl.getDeclContext()); while (outer && outer->getName().size() ) { if (outer->getName().compare("std") == 0 || outer->getName().compare("__gnu_cxx") == 0) { return true; } outer = dyn_cast_or_null<NamedDecl>(outer->getDeclContext()); } return false; } bool Analyze::IsStdClass(const clang::NamedDecl &cl) { // Return true if the class or template is declared directly in the // std namespace (modulo inline namespace). return cl.getDeclContext()->isStdNamespace(); } // See Sema::PushOnScopeChains bool Analyze::isOnScopeChains(const NamedDecl* ND, Sema& SemaR) { // Named decls without name shouldn't be in. Eg: struct {int a}; if (!ND->getDeclName()) return false; // Out-of-line definitions shouldn't be pushed into scope in C++. // Out-of-line variable and function definitions shouldn't even in C. if ((isa<VarDecl>(ND) || isa<FunctionDecl>(ND)) && ND->isOutOfLine() && !ND->getDeclContext()->getRedeclContext()->Equals( ND->getLexicalDeclContext()->getRedeclContext())) return false; // Template instantiations should also not be pushed into scope. if (isa<FunctionDecl>(ND) && cast<FunctionDecl>(ND)->isFunctionTemplateSpecialization()) return false; // Using directives are not registered onto the scope chain if (isa<UsingDirectiveDecl>(ND)) return false; IdentifierResolver::iterator IDRi = SemaR.IdResolver.begin(ND->getDeclName()), IDRiEnd = SemaR.IdResolver.end(); for (; IDRi != IDRiEnd; ++IDRi) { if (ND == *IDRi) return true; } // Check if the declaration is template instantiation, which is not in // any DeclContext yet, because it came from // Sema::PerformPendingInstantiations // if (isa<FunctionDecl>(D) && // cast<FunctionDecl>(D)->getTemplateInstantiationPattern()) // return false; return false; } unsigned int Transform::Config::DropDefaultArg(clang::TemplateDecl &Template) const { /// Return the number of default argument to drop. if (Analyze::IsStdClass(Template)) { static const char *stls[] = //container names {"vector","list","deque","map","multimap","set","multiset",0}; static unsigned int values[] = //number of default arg. {1,1,1,2,2,2,2}; StringRef name = Template.getName(); for(int k=0;stls[k];k++) { if ( name.equals(stls[k]) ) return values[k]; } } // Check in some struct if the Template decl is registered something like /* DefaultCollection::const_iterator iter; iter = m_defaultArgs.find(&Template); if (iter != m_defaultArgs.end()) { return iter->second; } */ return 0; } static bool ShouldKeepTypedef(const TypedefType* TT, const llvm::SmallSet<const Decl*, 4>& ToSkip) { // Return true, if we should keep this typedef rather than desugaring it. return 0 != ToSkip.count(TT->getDecl()->getCanonicalDecl()); } static bool SingleStepPartiallyDesugarTypeImpl(QualType& QT) { // WARNING: // // The large blocks of commented-out code in this routine // are there to support doing more desugaring in the future, // we will probably have to. // // Do not delete until we are completely sure we will // not be changing this routine again! // const Type* QTy = QT.getTypePtr(); Type::TypeClass TC = QTy->getTypeClass(); switch (TC) { // // Unconditionally sugared types. // case Type::Paren: { return false; //const ParenType* Ty = llvm::cast<ParenType>(QTy); //QT = Ty->desugar(); //return true; } case Type::Typedef: { const TypedefType* Ty = llvm::cast<TypedefType>(QTy); QT = Ty->desugar(); return true; } case Type::TypeOf: { const TypeOfType* Ty = llvm::cast<TypeOfType>(QTy); QT = Ty->desugar(); return true; } case Type::Attributed: { return false; //const AttributedType* Ty = llvm::cast<AttributedType>(QTy); //QT = Ty->desugar(); //return true; } case Type::SubstTemplateTypeParm: { const SubstTemplateTypeParmType* Ty = llvm::cast<SubstTemplateTypeParmType>(QTy); QT = Ty->desugar(); return true; } case Type::Elaborated: { const ElaboratedType* Ty = llvm::cast<ElaboratedType>(QTy); QT = Ty->desugar(); return true; } // // Conditionally sugared types. // case Type::TypeOfExpr: { const TypeOfExprType* Ty = llvm::cast<TypeOfExprType>(QTy); if (Ty->isSugared()) { QT = Ty->desugar(); return true; } return false; } case Type::Decltype: { const DecltypeType* Ty = llvm::cast<DecltypeType>(QTy); if (Ty->isSugared()) { QT = Ty->desugar(); return true; } return false; } case Type::UnaryTransform: { return false; //const UnaryTransformType* Ty = llvm::cast<UnaryTransformType>(QTy); //if (Ty->isSugared()) { // QT = Ty->desugar(); // return true; //} //return false; } case Type::Auto: { return false; //const AutoType* Ty = llvm::cast<AutoType>(QTy); //if (Ty->isSugared()) { // QT = Ty->desugar(); // return true; //} //return false; } case Type::TemplateSpecialization: { //const TemplateSpecializationType* Ty = // llvm::cast<TemplateSpecializationType>(QTy); // Too broad, this returns a the target template but with // canonical argument types. //if (Ty->isTypeAlias()) { // QT = Ty->getAliasedType(); // return true; //} // Too broad, this returns the canonical type //if (Ty->isSugared()) { // QT = Ty->desugar(); // return true; //} return false; } // Not a sugared type. default: { break; } } return false; } bool Transform::SingleStepPartiallyDesugarType(QualType &QT, const ASTContext &Context) { Qualifiers quals = QT.getQualifiers(); bool desugared = SingleStepPartiallyDesugarTypeImpl( QT ); if (desugared) { // If the types has been desugared it also lost its qualifiers. QT = Context.getQualifiedType(QT, quals); } return desugared; } static bool GetPartiallyDesugaredTypeImpl(const ASTContext& Ctx, TemplateArgument &arg, const Transform::Config& TypeConfig, bool fullyQualifyTmpltArg) { bool changed = false; if (arg.getKind() == TemplateArgument::Template) { TemplateName tname = arg.getAsTemplate(); // Note: should we not also desugar? changed = GetFullyQualifiedTemplateName(Ctx, tname); if (changed) { arg = TemplateArgument(tname); } } else if (arg.getKind() == TemplateArgument::Type) { QualType SubTy = arg.getAsType(); // Check if the type needs more desugaring and recurse. if (isa<TypedefType>(SubTy) || isa<TemplateSpecializationType>(SubTy) || isa<ElaboratedType>(SubTy) || fullyQualifyTmpltArg) { changed = true; QualType PDQT = GetPartiallyDesugaredTypeImpl(Ctx, SubTy, TypeConfig, /*fullyQualifyType=*/true, /*fullyQualifyTmpltArg=*/true); arg = TemplateArgument(PDQT); } } else if (arg.getKind() == TemplateArgument::Pack) { SmallVector<TemplateArgument, 2> desArgs; for (auto I = arg.pack_begin(), E = arg.pack_end(); I != E; ++I) { TemplateArgument pack_arg(*I); changed = GetPartiallyDesugaredTypeImpl(Ctx,pack_arg, TypeConfig, fullyQualifyTmpltArg); desArgs.push_back(pack_arg); } if (changed) { // The allocator in ASTContext is mutable ... // Keep the argument const to be inline will all the other interfaces // like: NestedNameSpecifier::Create ASTContext &mutableCtx( const_cast<ASTContext&>(Ctx) ); arg = TemplateArgument::CreatePackCopy(mutableCtx, ARRAY_COMPAT(desArgs)); } } return changed; } static const TemplateArgument* GetTmpltArgDeepFirstIndexPack(size_t &cur, const TemplateArgument& arg, size_t idx) { SmallVector<TemplateArgument, 2> desArgs; for (auto I = arg.pack_begin(), E = arg.pack_end(); cur < idx && I != E; ++cur,++I) { if ((*I).getKind() == TemplateArgument::Pack) { auto p_arg = GetTmpltArgDeepFirstIndexPack(cur,(*I),idx); if (cur == idx) return p_arg; } else if (cur == idx) { return I; } } return nullptr; } // Return the template argument corresponding to the index (idx) // when the composite list of arguement is seen flattened out deep // first (where depth is provided by template argument packs) static const TemplateArgument* GetTmpltArgDeepFirstIndex(const TemplateArgumentList& templateArgs, size_t idx) { for (size_t cur = 0, I = 0, E = templateArgs.size(); cur <= idx && I < E; ++I, ++cur) { auto &arg = templateArgs[I]; if (arg.getKind() == TemplateArgument::Pack) { // Need to recurse. auto p_arg = GetTmpltArgDeepFirstIndexPack(cur,arg,idx); if (cur == idx) return p_arg; } else if (cur == idx) { return &arg; } } return nullptr; } static QualType GetPartiallyDesugaredTypeImpl(const ASTContext& Ctx, QualType QT, const Transform::Config& TypeConfig, bool fullyQualifyType, bool fullyQualifyTmpltArg) { if (QT.isNull()) return QT; // If there are no constraints, then use the standard desugaring. if (TypeConfig.empty() && !fullyQualifyType && !fullyQualifyTmpltArg) return QT.getDesugaredType(Ctx); // In case of Int_t* we need to strip the pointer first, desugar and attach // the pointer once again. if (isa<PointerType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers quals = QT.getQualifiers(); QualType nQT; nQT = GetPartiallyDesugaredTypeImpl(Ctx, QT->getPointeeType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (nQT == QT->getPointeeType()) return QT; QT = Ctx.getPointerType(nQT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); return QT; } while (isa<SubstTemplateTypeParmType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers quals = QT.getQualifiers(); QT = dyn_cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar(); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); } // In case of Int_t& we need to strip the pointer first, desugar and attach // the reference once again. if (isa<ReferenceType>(QT.getTypePtr())) { // Get the qualifiers. bool isLValueRefTy = isa<LValueReferenceType>(QT.getTypePtr()); Qualifiers quals = QT.getQualifiers(); QualType nQT; nQT = GetPartiallyDesugaredTypeImpl(Ctx, QT->getPointeeType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (nQT == QT->getPointeeType()) return QT; // Add the r- or l-value reference type back to the desugared one. if (isLValueRefTy) QT = Ctx.getLValueReferenceType(nQT); else QT = Ctx.getRValueReferenceType(nQT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); return QT; } // In case of Int_t[2] we need to strip the array first, desugar and attach // the array once again. if (isa<ArrayType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers quals = QT.getQualifiers(); if (isa<ConstantArrayType>(QT.getTypePtr())) { const ConstantArrayType *arr = dyn_cast<ConstantArrayType>(QT.getTypePtr()); QualType newQT = GetPartiallyDesugaredTypeImpl(Ctx,arr->getElementType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (newQT == arr->getElementType()) return QT; QT = Ctx.getConstantArrayType (newQT, arr->getSize(), arr->getSizeModifier(), arr->getIndexTypeCVRQualifiers()); } else if (isa<DependentSizedArrayType>(QT.getTypePtr())) { const DependentSizedArrayType *arr = dyn_cast<DependentSizedArrayType>(QT.getTypePtr()); QualType newQT = GetPartiallyDesugaredTypeImpl(Ctx,arr->getElementType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (newQT == QT) return QT; QT = Ctx.getDependentSizedArrayType (newQT, arr->getSizeExpr(), arr->getSizeModifier(), arr->getIndexTypeCVRQualifiers(), arr->getBracketsRange()); } else if (isa<IncompleteArrayType>(QT.getTypePtr())) { const IncompleteArrayType *arr = dyn_cast<IncompleteArrayType>(QT.getTypePtr()); QualType newQT = GetPartiallyDesugaredTypeImpl(Ctx,arr->getElementType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (newQT == arr->getElementType()) return QT; QT = Ctx.getIncompleteArrayType (newQT, arr->getSizeModifier(), arr->getIndexTypeCVRQualifiers()); } else if (isa<VariableArrayType>(QT.getTypePtr())) { const VariableArrayType *arr = dyn_cast<VariableArrayType>(QT.getTypePtr()); QualType newQT = GetPartiallyDesugaredTypeImpl(Ctx,arr->getElementType(), TypeConfig, fullyQualifyType,fullyQualifyTmpltArg); if (newQT == arr->getElementType()) return QT; QT = Ctx.getVariableArrayType (newQT, arr->getSizeExpr(), arr->getSizeModifier(), arr->getIndexTypeCVRQualifiers(), arr->getBracketsRange()); } // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); return QT; } // If the type is elaborated, first remove the prefix and then // when we are done we will as needed add back the (new) prefix. // for example for std::vector<int>::iterator, we work on // just 'iterator' (which remember which scope its from) // and remove the typedef to get (for example), // __gnu_cxx::__normal_iterator // which is *not* in the std::vector<int> scope and it is // the __gnu__cxx part we should use as the prefix. // NOTE: however we problably want to add the std::vector typedefs // to the list of things to skip! NestedNameSpecifier* original_prefix = 0; Qualifiers prefix_qualifiers; const ElaboratedType* etype_input = dyn_cast<ElaboratedType>(QT.getTypePtr()); if (etype_input) { // Intentionally, we do not care about the other compononent of // the elaborated type (the keyword) as part of the partial // desugaring (and/or name normaliztation) is to remove it. original_prefix = etype_input->getQualifier(); if (original_prefix) { const NamespaceDecl *ns = original_prefix->getAsNamespace(); if (!(ns && ns->isAnonymousNamespace())) { // We have to also desugar the prefix unless // it does not have a name (anonymous namespaces). fullyQualifyType = true; prefix_qualifiers = QT.getLocalQualifiers(); QT = QualType(etype_input->getNamedType().getTypePtr(),0); } else { original_prefix = 0; } } } // Desugar QT until we cannot desugar any more, or // we hit one of the special typedefs. while (1) { if (const TypedefType* TT = llvm::dyn_cast<TypedefType>(QT.getTypePtr())){ if (ShouldKeepTypedef(TT, TypeConfig.m_toSkip)) { if (!fullyQualifyType && !fullyQualifyTmpltArg) { return QT; } // We might have stripped the namespace/scope part, // so we must go on to add it back. break; } } bool wasDesugared = Transform::SingleStepPartiallyDesugarType(QT,Ctx); // Did we get to a basic_string, let's get back to std::string Transform::Config::ReplaceCollection::const_iterator iter = TypeConfig.m_toReplace.find(QT->getCanonicalTypeInternal().getTypePtr()); if (iter != TypeConfig.m_toReplace.end()) { Qualifiers quals = QT.getQualifiers(); QT = QualType( iter->second, 0); QT = Ctx.getQualifiedType(QT,quals); break; } if (!wasDesugared) { // No more work to do, stop now. break; } } // If we have a reference, array or pointer we still need to // desugar what they point to. if (isa<PointerType>(QT.getTypePtr()) || isa<ReferenceType>(QT.getTypePtr()) || isa<ArrayType>(QT.getTypePtr())) { return GetPartiallyDesugaredTypeImpl(Ctx, QT, TypeConfig, fullyQualifyType, fullyQualifyTmpltArg); } NestedNameSpecifier* prefix = 0; const ElaboratedType* etype = dyn_cast<ElaboratedType>(QT.getTypePtr()); if (etype) { prefix = SelectPrefix(Ctx,etype,original_prefix,TypeConfig); prefix_qualifiers.addQualifiers(QT.getLocalQualifiers()); QT = QualType(etype->getNamedType().getTypePtr(),0); } else if (fullyQualifyType) { // Let's check whether this type should have been an elaborated type. // in which case we want to add it ... but we can't really preserve // the typedef in this case ... Decl *decl = 0; const TypedefType* typedeftype = dyn_cast_or_null<TypedefType>(QT.getTypePtr()); if (typedeftype) { decl = typedeftype->getDecl(); } else { // There are probably other cases ... const TagType* tagdecltype = dyn_cast_or_null<TagType>(QT.getTypePtr()); if (tagdecltype) { decl = tagdecltype->getDecl(); } else { decl = QT->getAsCXXRecordDecl(); } } if (decl) { NamedDecl* outer = dyn_cast_or_null<NamedDecl>(decl->getDeclContext()); NamespaceDecl* outer_ns = dyn_cast_or_null<NamespaceDecl>(decl->getDeclContext()); if (outer && !(outer_ns && outer_ns->isAnonymousNamespace()) && !outer->getNameAsString().empty() ) { if (original_prefix) { const Type *oldtype = original_prefix->getAsType(); if (oldtype) { if (oldtype->getAsCXXRecordDecl() == outer) { // Same type, use the original spelling prefix = GetPartiallyDesugaredNNS(Ctx, original_prefix, TypeConfig); outer = 0; // Cancel the later creation. } } else { const NamespaceDecl *old_ns = original_prefix->getAsNamespace(); if (old_ns) { old_ns = old_ns->getCanonicalDecl(); } else if (NamespaceAliasDecl *alias = original_prefix->getAsNamespaceAlias()) { old_ns = alias->getNamespace()->getCanonicalDecl(); } const NamespaceDecl *new_ns = dyn_cast<NamespaceDecl>(outer); if (new_ns) new_ns = new_ns->getCanonicalDecl(); if (old_ns == new_ns) { // This is the same namespace, use the original prefix // as a starting point. prefix = GetFullyQualifiedNameSpecifier(Ctx,original_prefix); outer = 0; // Cancel the later creation. } } } else { // if (!original_prefix) // move qualifiers on the outer type (avoid 'std::const string'!) prefix_qualifiers = QT.getLocalQualifiers(); QT = QualType(QT.getTypePtr(),0); } if (outer) { if (decl->getDeclContext()->isNamespace()) { prefix = TypeName::CreateNestedNameSpecifier(Ctx, dyn_cast<NamespaceDecl>(outer)); } else { // We should only create the nested name specifier // if the outer scope is really a TagDecl. // It could also be a CXXMethod for example. TagDecl *tdecl = dyn_cast<TagDecl>(outer); if (tdecl) { prefix = TypeName::CreateNestedNameSpecifier(Ctx,tdecl, false /*FullyQualified*/); prefix = GetPartiallyDesugaredNNS(Ctx,prefix,TypeConfig); } } } } } } // In case of template specializations iterate over the arguments and // desugar them as well. if (const TemplateSpecializationType* TST = dyn_cast<const TemplateSpecializationType>(QT.getTypePtr())) { if (TST->isTypeAlias()) { QualType targetType = TST->getAliasedType(); /* // We really need to find a way to propagate/keep the opaque typedef // that are available in TST to the aliased type. We would need // to do something like: QualType targetType = TST->getAliasedType(); QualType resubst = ReSubstTemplateArg(targetType,TST); return GetPartiallyDesugaredTypeImpl(Ctx, resubst, TypeConfig, fullyQualifyType, fullyQualifyTmpltArg); // But this is not quite right (ReSubstTemplateArg is from TMetaUtils) // as it does not resubst for template <typename T> using myvector = std::vector<T>; myvector<Double32_t> vd32d; // and does not work at all for template<class T> using ptr = T*; ptr<Double32_t> p2; // as the target is not a template. */ // So for now just return move on with the least lose we can do return GetPartiallyDesugaredTypeImpl(Ctx, targetType, TypeConfig, fullyQualifyType, fullyQualifyTmpltArg); } bool mightHaveChanged = false; llvm::SmallVector<TemplateArgument, 4> desArgs; unsigned int argi = 0; for(TemplateSpecializationType::iterator I = TST->begin(), E = TST->end(); I != E; ++I, ++argi) { if (I->getKind() == TemplateArgument::Expression) { // If we have an expression, we need to replace it / desugar it // as it could contain unqualifed (or partially qualified or // private) parts. QualType canon = QT->getCanonicalTypeInternal(); const RecordType *TSTRecord = dyn_cast<const RecordType>(canon.getTypePtr()); if (TSTRecord) { if (const ClassTemplateSpecializationDecl* TSTdecl = dyn_cast<ClassTemplateSpecializationDecl>(TSTRecord->getDecl())) { const TemplateArgumentList& templateArgs = TSTdecl->getTemplateArgs(); mightHaveChanged = true; const TemplateArgument *match = GetTmpltArgDeepFirstIndex(templateArgs,argi); if (match) desArgs.push_back(*match); continue; } } } if (I->getKind() == TemplateArgument::Template) { TemplateName tname = I->getAsTemplate(); // Note: should we not also desugar? bool changed = GetFullyQualifiedTemplateName(Ctx, tname); if (changed) { desArgs.push_back(TemplateArgument(tname)); mightHaveChanged = true; } else desArgs.push_back(*I); continue; } if (I->getKind() != TemplateArgument::Type) { desArgs.push_back(*I); continue; } QualType SubTy = I->getAsType(); // Check if the type needs more desugaring and recurse. if (isa<TypedefType>(SubTy) || isa<TemplateSpecializationType>(SubTy) || isa<ElaboratedType>(SubTy) || fullyQualifyTmpltArg) { QualType PDQT = GetPartiallyDesugaredTypeImpl(Ctx, SubTy, TypeConfig, fullyQualifyType, fullyQualifyTmpltArg); mightHaveChanged |= (SubTy != PDQT); desArgs.push_back(TemplateArgument(PDQT)); } else { desArgs.push_back(*I); } } // If desugaring happened allocate new type in the AST. if (mightHaveChanged) { Qualifiers qualifiers = QT.getLocalQualifiers(); QT = Ctx.getTemplateSpecializationType(TST->getTemplateName(), ARRAY_COMPAT(desArgs), TST->getCanonicalTypeInternal()); QT = Ctx.getQualifiedType(QT, qualifiers); } } else if (fullyQualifyTmpltArg) { if (const RecordType *TSTRecord = dyn_cast<const RecordType>(QT.getTypePtr())) { // We are asked to fully qualify and we have a Record Type, // which can point to a template instantiation with no sugar in any of // its template argument, however we still need to fully qualify them. if (const ClassTemplateSpecializationDecl* TSTdecl = dyn_cast<ClassTemplateSpecializationDecl>(TSTRecord->getDecl())) { const TemplateArgumentList& templateArgs = TSTdecl->getTemplateArgs(); bool mightHaveChanged = false; llvm::SmallVector<TemplateArgument, 4> desArgs; for(unsigned int I = 0, E = templateArgs.size(); I != E; ++I) { #if 1 // cheap to copy and potentially modified by // GetPartiallyDesugaredTypeImpl TemplateArgument arg(templateArgs[I]); mightHaveChanged |= GetPartiallyDesugaredTypeImpl(Ctx,arg, TypeConfig, fullyQualifyTmpltArg); desArgs.push_back(arg); #else if (templateArgs[I].getKind() == TemplateArgument::Template) { TemplateName tname = templateArgs[I].getAsTemplate(); // Note: should we not also desugar? bool changed = GetFullyQualifiedTemplateName(Ctx, tname); if (changed) { desArgs.push_back(TemplateArgument(tname)); mightHaveChanged = true; } else desArgs.push_back(templateArgs[I]); continue; } if (templateArgs[I].getKind() != TemplateArgument::Type) { desArgs.push_back(templateArgs[I]); continue; } QualType SubTy = templateArgs[I].getAsType(); // Check if the type needs more desugaring and recurse. if (isa<TypedefType>(SubTy) || isa<TemplateSpecializationType>(SubTy) || isa<ElaboratedType>(SubTy) || fullyQualifyTmpltArg) { mightHaveChanged = true; QualType PDQT = GetPartiallyDesugaredTypeImpl(Ctx, SubTy, TypeConfig, /*fullyQualifyType=*/true, /*fullyQualifyTmpltArg=*/true); desArgs.push_back(TemplateArgument(PDQT)); } else { desArgs.push_back(templateArgs[I]); } #endif } // If desugaring happened allocate new type in the AST. if (mightHaveChanged) { Qualifiers qualifiers = QT.getLocalQualifiers(); TemplateName TN(TSTdecl->getSpecializedTemplate()); QT = Ctx.getTemplateSpecializationType(TN, ARRAY_COMPAT(desArgs), TSTRecord->getCanonicalTypeInternal()); QT = Ctx.getQualifiedType(QT, qualifiers); } } } } // TODO: Find a way to avoid creating new types, if the input is already // fully qualified. if (prefix) { // We intentionally always use ETK_None, we never want // the keyword (humm ... what about anonymous types?) QT = Ctx.getElaboratedType(ETK_None,prefix,QT); QT = Ctx.getQualifiedType(QT, prefix_qualifiers); } else if (original_prefix) { QT = Ctx.getQualifiedType(QT, prefix_qualifiers); } return QT; } QualType Transform::GetPartiallyDesugaredType(const ASTContext& Ctx, QualType QT, const Transform::Config& TypeConfig, bool fullyQualify/*=true*/) { return GetPartiallyDesugaredTypeImpl(Ctx,QT,TypeConfig, /*qualifyType*/fullyQualify, /*qualifyTmpltArg*/fullyQualify); } NamespaceDecl* Lookup::Namespace(Sema* S, const char* Name, const DeclContext* Within) { DeclarationName DName = &S->Context.Idents.get(Name); LookupResult R(*S, DName, SourceLocation(), Sema::LookupNestedNameSpecifierName); R.suppressDiagnostics(); if (!Within) S->LookupName(R, S->TUScope); else { if (const clang::TagDecl* TD = dyn_cast<clang::TagDecl>(Within)) { if (!TD->getDefinition()) { // No definition, no lookup result. return 0; } } S->LookupQualifiedName(R, const_cast<DeclContext*>(Within)); } if (R.empty()) return 0; R.resolveKind(); return dyn_cast<NamespaceDecl>(R.getFoundDecl()); } // NamedDecl* Lookup::Named(Sema* S, llvm::StringRef Name, // const DeclContext* Within) { // DeclarationName DName = &S->Context.Idents.get(Name); // return Lookup::Named(S, DName, Within); // } // NamedDecl* Lookup::Named(Sema* S, const char* Name, // const DeclContext* Within) { // return Lookup::Named(S, llvm::StringRef(Name), Within); // } // NamedDecl* Lookup::Named(Sema* S, const clang::DeclarationName& Name, // const DeclContext* Within) { // LookupResult R(*S, Name, SourceLocation(), Sema::LookupOrdinaryName, // Sema::ForRedeclaration); // Lookup::Named(S, R, Within); // return LookupResult2Decl<clang::NamedDecl>(R); // } // TagDecl* Lookup::Tag(Sema* S, llvm::StringRef Name, // const DeclContext* Within) { // DeclarationName DName = &S->Context.Idents.get(Name); // return Lookup::Tag(S, DName, Within); // } // TagDecl* Lookup::Tag(Sema* S, const char* Name, // const DeclContext* Within) { // return Lookup::Tag(S, llvm::StringRef(Name), Within); // } // TagDecl* Lookup::Tag(Sema* S, const clang::DeclarationName& Name, // const DeclContext* Within) { // LookupResult R(*S, Name, SourceLocation(), Sema::LookupTagName, // Sema::ForRedeclaration); // Lookup::Named(S, R, Within); // return LookupResult2Decl<clang::TagDecl>(R); // } void Lookup::Named(Sema* S, LookupResult& R, const DeclContext* Within) { R.suppressDiagnostics(); if (!Within) S->LookupName(R, S->TUScope); else { const DeclContext* primaryWithin = nullptr; if (const clang::TagDecl *TD = dyn_cast<clang::TagDecl>(Within)) { primaryWithin = dyn_cast_or_null<DeclContext>(TD->getDefinition()); } else { primaryWithin = Within->getPrimaryContext(); } if (!primaryWithin) { // No definition, no lookup result. return; } S->LookupQualifiedName(R, const_cast<DeclContext*>(primaryWithin)); } } static NestedNameSpecifier* CreateNestedNameSpecifierForScopeOf(const ASTContext& Ctx, const Decl *decl, bool FullyQualified) { // Create a nested name specifier for the declaring context of the type. assert(decl); const NamedDecl* outer = llvm::dyn_cast_or_null<NamedDecl>(decl->getDeclContext()); const NamespaceDecl* outer_ns = llvm::dyn_cast_or_null<NamespaceDecl>(decl->getDeclContext()); if (outer && !(outer_ns && outer_ns->isAnonymousNamespace())) { if (const CXXRecordDecl *cxxdecl = llvm::dyn_cast<CXXRecordDecl>(decl->getDeclContext())) { if (ClassTemplateDecl *clTempl = cxxdecl->getDescribedClassTemplate()) { // We are in the case of a type(def) that was declared in a // class template but is *not* type dependent. In clang, it gets // attached to the class template declaration rather than any // specific class template instantiation. This result in 'odd' // fully qualified typename: // vector<_Tp,_Alloc>::size_type // Make the situation is 'useable' but looking a bit odd by // picking a random instance as the declaring context. if (clTempl->spec_begin() != clTempl->spec_end()) { decl = *(clTempl->spec_begin()); outer = llvm::dyn_cast<NamedDecl>(decl); outer_ns = llvm::dyn_cast<NamespaceDecl>(decl); } } } if (outer_ns) { return TypeName::CreateNestedNameSpecifier(Ctx,outer_ns); } else if (const TagDecl* TD = llvm::dyn_cast<TagDecl>(outer)) { return TypeName::CreateNestedNameSpecifier(Ctx, TD, FullyQualified); } } return 0; } static NestedNameSpecifier* CreateNestedNameSpecifierForScopeOf(const ASTContext& Ctx, const Type *TypePtr, bool FullyQualified) { // Create a nested name specifier for the declaring context of the type. if (!TypePtr) return 0; //////////////////////////// // MODIFIED FOR CHIMERA //////////////////////////// Decl *decl = 0; if (const TypedefType* typedeftype = llvm::dyn_cast<TypedefType>(TypePtr)) { decl = typedeftype->getDecl(); } else if (const TagType* tagdecltype = llvm::dyn_cast_or_null<TagType>(TypePtr)) { decl = tagdecltype->getDecl(); } else if (const TemplateSpecializationType* templtype = llvm::dyn_cast_or_null<TemplateSpecializationType>(TypePtr)) { decl = templtype->getTemplateName().getAsTemplateDecl(); } else { // There are probably other cases ... decl = TypePtr->getAsCXXRecordDecl(); } //////////////////////////// // END MODIFIED FOR CHIMERA //////////////////////////// if (!decl) return 0; return CreateNestedNameSpecifierForScopeOf(Ctx, decl, FullyQualified); } NestedNameSpecifier* TypeName::CreateNestedNameSpecifier(const ASTContext& Ctx, const NamespaceDecl* Namesp) { while (Namesp && Namesp->isInline()) { // Ignore inline namespace; Namesp = dyn_cast_or_null<NamespaceDecl>(Namesp->getDeclContext()); } if (!Namesp) return 0; bool FullyQualified = true; // doesn't matter, DeclContexts are namespaces return NestedNameSpecifier::Create(Ctx, CreateOuterNNS(Ctx, Namesp, FullyQualified), Namesp); } NestedNameSpecifier* TypeName::CreateNestedNameSpecifier(const ASTContext& Ctx, const TypedefNameDecl* TD, bool FullyQualify) { return NestedNameSpecifier::Create(Ctx, CreateOuterNNS(Ctx, TD, FullyQualify), true /*Template*/, TD->getTypeForDecl()); } NestedNameSpecifier* TypeName::CreateNestedNameSpecifier(const ASTContext& Ctx, const TagDecl *TD, bool FullyQualify) { const Type* Ty = Ctx.getTypeDeclType(TD).getTypePtr(); if (FullyQualify) Ty = GetFullyQualifiedLocalType(Ctx, Ty); return NestedNameSpecifier::Create(Ctx, CreateOuterNNS(Ctx, TD, FullyQualify), false /* template keyword wanted */, Ty); } QualType TypeName::GetFullyQualifiedType(QualType QT, const ASTContext& Ctx) { // Return the fully qualified type, if we need to recurse through any // template parameter, this needs to be merged somehow with // GetPartialDesugaredType. //////////////////////////// // MODIFIED FOR CHIMERA //////////////////////////// // Remove the part of the type related to the type being a template // parameter (we won't report it as part of the 'type name' and it is // actually make the code below to be more complex (to handle those) while (isa<SubstTemplateTypeParmType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers quals = QT.getQualifiers(); QT = dyn_cast<SubstTemplateTypeParmType>(QT.getTypePtr())->desugar(); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); } if (llvm::isa<MemberPointerType>(QT.getTypePtr())) { Qualifiers quals = QT.getQualifiers(); const Type *class_type = llvm::cast<MemberPointerType>(QT.getTypePtr())->getClass(); class_type = GetFullyQualifiedType(class_type->getCanonicalTypeInternal(), Ctx).getTypePtr(); QT = GetFullyQualifiedType(QT->getPointeeType(), Ctx); QT = Ctx.getMemberPointerType(QT, class_type); QT = Ctx.getQualifiedType(QT, quals); return QT; } if (llvm::isa<FunctionProtoType>(QT.getTypePtr())) { const FunctionProtoType *function_type = llvm::cast<FunctionProtoType>(QT.getTypePtr()); QualType return_type = function_type->getReturnType(); return_type = GetFullyQualifiedType(return_type, Ctx); std::vector<QualType> qualified_param_types; qualified_param_types.reserve(function_type->getNumParams()); for (const QualType &param_type : function_type->param_types()) { qualified_param_types.push_back(GetFullyQualifiedType(param_type, Ctx)); } Qualifiers quals = QT.getQualifiers(); QT = Ctx.getFunctionType(return_type, qualified_param_types, function_type->getExtProtoInfo()); QT = Ctx.getQualifiedType(QT, quals); return QT; } if (llvm::isa<ConstantArrayType>(QT.getTypePtr())) { const ConstantArrayType *array_type = llvm::cast<ConstantArrayType>(QT.getTypePtr()); QualType element_type = array_type->getElementType(); element_type = GetFullyQualifiedType(element_type, Ctx); Qualifiers quals = QT.getQualifiers(); QT = Ctx.getConstantArrayType(element_type, array_type->getSize(), array_type->getSizeModifier(), array_type->getIndexTypeCVRQualifiers()); QT = Ctx.getQualifiedType(QT, quals); return QT; } if (llvm::isa<DependentSizedArrayType>(QT.getTypePtr())) { const DependentSizedArrayType *array_type = llvm::cast<DependentSizedArrayType>(QT.getTypePtr()); QualType element_type = array_type->getElementType(); element_type = GetFullyQualifiedType(element_type, Ctx); Qualifiers quals = QT.getQualifiers(); QT = Ctx.getDependentSizedArrayType(element_type, array_type->getSizeExpr(), array_type->getSizeModifier(), array_type->getIndexTypeCVRQualifiers(), array_type->getBracketsRange()); QT = Ctx.getQualifiedType(QT, quals); return QT; } if (llvm::isa<IncompleteArrayType>(QT.getTypePtr())) { const IncompleteArrayType *array_type = llvm::cast<IncompleteArrayType>(QT.getTypePtr()); QualType element_type = array_type->getElementType(); element_type = GetFullyQualifiedType(element_type, Ctx); Qualifiers quals = QT.getQualifiers(); QT = Ctx.getIncompleteArrayType(element_type, array_type->getSizeModifier(), array_type->getIndexTypeCVRQualifiers()); QT = Ctx.getQualifiedType(QT, quals); return QT; } if (llvm::isa<VariableArrayType>(QT.getTypePtr())) { const VariableArrayType *array_type = llvm::cast<VariableArrayType>(QT.getTypePtr()); QualType element_type = array_type->getElementType(); element_type = GetFullyQualifiedType(element_type, Ctx); Qualifiers quals = QT.getQualifiers(); QT = Ctx.getVariableArrayType(element_type, array_type->getSizeExpr(), array_type->getSizeModifier(), array_type->getIndexTypeCVRQualifiers(), array_type->getBracketsRange()); QT = Ctx.getQualifiedType(QT, quals); return QT; } // In case of myType* we need to strip the pointer first, fully qualifiy // and attach the pointer once again. if (llvm::isa<PointerType>(QT.getTypePtr())) { // Get the qualifiers. Qualifiers quals = QT.getQualifiers(); QT = GetFullyQualifiedType(QT->getPointeeType(), Ctx); QT = Ctx.getPointerType(QT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); return QT; } // In case of myType& we need to strip the pointer first, fully qualifiy // and attach the pointer once again. if (llvm::isa<ReferenceType>(QT.getTypePtr())) { // Get the qualifiers. bool isLValueRefTy = llvm::isa<LValueReferenceType>(QT.getTypePtr()); Qualifiers quals = QT.getQualifiers(); QT = GetFullyQualifiedType(QT->getPointeeType(), Ctx); // Add the r- or l-value reference type back to the desugared one. if (isLValueRefTy) QT = Ctx.getLValueReferenceType(QT); else QT = Ctx.getRValueReferenceType(QT); // Add back the qualifiers. QT = Ctx.getQualifiedType(QT, quals); return QT; } // Strip deduced types. if (const AutoType* AutoTy = dyn_cast<AutoType>(QT.getTypePtr())) { if (!AutoTy->getDeducedType().isNull()) return GetFullyQualifiedType(AutoTy->getDeducedType(), Ctx); } //////////////////////////// // END MODIFIED FOR CHIMERA //////////////////////////// NestedNameSpecifier* prefix = 0; Qualifiers prefix_qualifiers; if (const ElaboratedType* etype_input = llvm::dyn_cast<ElaboratedType>(QT.getTypePtr())) { // Intentionally, we do not care about the other compononent of // the elaborated type (the keyword) as part of the partial // desugaring (and/or name normalization) is to remove it. prefix = etype_input->getQualifier(); if (prefix) { const NamespaceDecl *ns = prefix->getAsNamespace(); if (prefix != NestedNameSpecifier::GlobalSpecifier(Ctx) && !(ns && ns->isAnonymousNamespace())) { prefix_qualifiers = QT.getLocalQualifiers(); prefix = GetFullyQualifiedNameSpecifier(Ctx, prefix); QT = QualType(etype_input->getNamedType().getTypePtr(),0); } else { prefix = 0; } } } else { // Create a nested name specifier if needed (i.e. if the decl context // is not the global scope. prefix = CreateNestedNameSpecifierForScopeOf(Ctx,QT.getTypePtr(), true /*FullyQualified*/); // move the qualifiers on the outer type (avoid 'std::const string'!) if (prefix) { prefix_qualifiers = QT.getLocalQualifiers(); QT = QualType(QT.getTypePtr(),0); } } // In case of template specializations iterate over the arguments and // fully qualify them as well. if(llvm::isa<const TemplateSpecializationType>(QT.getTypePtr())) { Qualifiers qualifiers = QT.getLocalQualifiers(); const Type *TypePtr = GetFullyQualifiedLocalType(Ctx,QT.getTypePtr()); QT = Ctx.getQualifiedType(TypePtr, qualifiers); } else if (llvm::isa<const RecordType>(QT.getTypePtr())) { // We are asked to fully qualify and we have a Record Type, // which can point to a template instantiation with no sugar in any of // its template argument, however we still need to fully qualify them. Qualifiers qualifiers = QT.getLocalQualifiers(); const Type *TypePtr = GetFullyQualifiedLocalType(Ctx,QT.getTypePtr()); QT = Ctx.getQualifiedType(TypePtr, qualifiers); } if (prefix) { // We intentionally always use ETK_None, we never want // the keyword (humm ... what about anonymous types?) QT = Ctx.getElaboratedType(ETK_None,prefix,QT); QT = Ctx.getQualifiedType(QT, prefix_qualifiers); } return QT; } std::string TypeName::GetFullyQualifiedName(QualType QT, const ASTContext &Ctx) { QualType FQQT = GetFullyQualifiedType(QT, Ctx); PrintingPolicy Policy(Ctx.getPrintingPolicy()); Policy.SuppressScope = false; Policy.AnonymousTagLocations = false; return FQQT.getAsString(Policy); } } // end namespace utils } // end namespace cling
personalrobotics/chimera
external/cling/src/cling_utils_AST.cpp
C++
bsd-3-clause
71,149
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE126_Buffer_Overread__malloc_wchar_t_memmove_72a.cpp Label Definition File: CWE126_Buffer_Overread__malloc.label.xml Template File: sources-sink-72a.tmpl.cpp */ /* * @description * CWE: 126 Buffer Over-read * BadSource: Use a small buffer * GoodSource: Use a large buffer * Sinks: memmove * BadSink : Copy data to string using memmove * Flow Variant: 72 Data flow: data passed in a vector from one function to another in different source files * * */ #include "std_testcase.h" #include <vector> #include <wchar.h> using namespace std; namespace CWE126_Buffer_Overread__malloc_wchar_t_memmove_72 { #ifndef OMITBAD /* bad function declaration */ void badSink(vector<wchar_t *> dataVector); void bad() { wchar_t * data; vector<wchar_t *> dataVector; data = NULL; /* FLAW: Use a small buffer */ data = (wchar_t *)malloc(50*sizeof(wchar_t)); if (data == NULL) {exit(-1);} wmemset(data, L'A', 50-1); /* fill with 'A's */ data[50-1] = L'\0'; /* null terminate */ /* Put data in a vector */ dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); badSink(dataVector); } #endif /* OMITBAD */ #ifndef OMITGOOD /* good function declarations */ /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(vector<wchar_t *> dataVector); static void goodG2B() { wchar_t * data; vector<wchar_t *> dataVector; data = NULL; /* FIX: Use a large buffer */ data = (wchar_t *)malloc(100*sizeof(wchar_t)); if (data == NULL) {exit(-1);} wmemset(data, L'A', 100-1); /* fill with 'A's */ data[100-1] = L'\0'; /* null terminate */ /* Put data in a vector */ dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); dataVector.insert(dataVector.end(), 1, data); goodG2BSink(dataVector); } void good() { goodG2B(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE126_Buffer_Overread__malloc_wchar_t_memmove_72; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE126_Buffer_Overread/s02/CWE126_Buffer_Overread__malloc_wchar_t_memmove_72a.cpp
C++
bsd-3-clause
2,936
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53a.c Label Definition File: CWE126_Buffer_Overread__malloc.label.xml Template File: sources-sink-53a.tmpl.c */ /* * @description * CWE: 126 Buffer Over-read * BadSource: Use a small buffer * GoodSource: Use a large buffer * Sink: memcpy * BadSink : Copy data to string using memcpy * Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifndef OMITBAD /* bad function declaration */ void CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53b_badSink(wchar_t * data); void CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53_bad() { wchar_t * data; data = NULL; /* FLAW: Use a small buffer */ data = (wchar_t *)malloc(50*sizeof(wchar_t)); if (data == NULL) {exit(-1);} wmemset(data, L'A', 50-1); /* fill with 'A's */ data[50-1] = L'\0'; /* null terminate */ CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53b_badSink(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* good function declaration */ void CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53b_goodG2BSink(wchar_t * data); /* goodG2B uses the GoodSource with the BadSink */ static void goodG2B() { wchar_t * data; data = NULL; /* FIX: Use a large buffer */ data = (wchar_t *)malloc(100*sizeof(wchar_t)); if (data == NULL) {exit(-1);} wmemset(data, L'A', 100-1); /* fill with 'A's */ data[100-1] = L'\0'; /* null terminate */ CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53b_goodG2BSink(data); } void CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE126_Buffer_Overread/s02/CWE126_Buffer_Overread__malloc_wchar_t_memcpy_53a.c
C
bsd-3-clause
2,637
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_connect_socket_72b.cpp Label Definition File: CWE680_Integer_Overflow_to_Buffer_Overflow__malloc.label.xml Template File: sources-sink-72b.tmpl.cpp */ /* * @description * CWE: 680 Integer Overflow to Buffer Overflow * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Small number greater than zero that will not cause an integer overflow in the sink * Sinks: * BadSink : Attempt to allocate array using length value from source * Flow Variant: 72 Data flow: data passed in a vector from one function to another in different source files * * */ #include "std_testcase.h" #include <vector> using namespace std; namespace CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_connect_socket_72 { #ifndef OMITBAD void badSink(vector<int> dataVector) { /* copy data out of dataVector */ int data = dataVector[2]; { size_t i; int *intPointer; /* POTENTIAL FLAW: if data * sizeof(int) > SIZE_MAX, overflows to a small value * so that the for loop doing the initialization causes a buffer overflow */ intPointer = (int*)malloc(data * sizeof(int)); if (intPointer == NULL) {exit(-1);} for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* Potentially writes beyond the boundary of intPointer */ } printIntLine(intPointer[0]); free(intPointer); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(vector<int> dataVector) { int data = dataVector[2]; { size_t i; int *intPointer; /* POTENTIAL FLAW: if data * sizeof(int) > SIZE_MAX, overflows to a small value * so that the for loop doing the initialization causes a buffer overflow */ intPointer = (int*)malloc(data * sizeof(int)); if (intPointer == NULL) {exit(-1);} for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* Potentially writes beyond the boundary of intPointer */ } printIntLine(intPointer[0]); free(intPointer); } } #endif /* OMITGOOD */ } /* close namespace */
JianpingZeng/xcc
xcc/test/juliet/testcases/CWE680_Integer_Overflow_to_Buffer_Overflow/CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_connect_socket_72b.cpp
C++
bsd-3-clause
2,339
use na::{self, RealField}; /// Parameters for a time-step of the physics engine. #[derive(Clone)] pub struct IntegrationParameters<N: RealField> { /// The timestep (default: `1.0 / 60.0`) dt: N, /// The inverse of `dt`. inv_dt: N, /// If `true`, the world's `step` method will stop right after resolving exactly one CCD event (default: `false`). /// This allows the user to take action during a timestep, in-between two CCD events. pub return_after_ccd_substep: bool, /// The total elapsed time in the physics world. /// /// This is the accumulation of the `dt` of all the calls to `world.step()`. pub t: N, /// The Error Reduction Parameter in `[0, 1]` is the proportion of /// the positional error to be corrected at each time step (default: `0.2`). pub erp: N, /// Each cached impulse are multiplied by this coefficient in `[0, 1]` /// when they are re-used to initialize the solver (default `1.0`). pub warmstart_coeff: N, /// Contacts at points where the involved bodies have a relative /// velocity smaller than this threshold wont be affected by the restitution force (default: `1.0`). pub restitution_velocity_threshold: N, /// Ammount of penetration the engine wont attempt to correct (default: `0.001m`). pub allowed_linear_error: N, /// Ammount of angular drift of joint limits the engine wont /// attempt to correct (default: `0.001rad`). pub allowed_angular_error: N, /// Maximum linear correction during one step of the non-linear position solver (default: `0.2`). pub max_linear_correction: N, /// Maximum angular correction during one step of the non-linear position solver (default: `0.2`). pub max_angular_correction: N, /// Maximum nonlinear SOR-prox scaling parameter when the constraint /// correction direction is close to the kernel of the involved multibody's /// jacobian (default: `0.2`). pub max_stabilization_multiplier: N, /// Maximum number of iterations performed by the velocity constraints solver (default: `8`). pub max_velocity_iterations: usize, /// Maximum number of iterations performed by the position-based constraints solver (default: `3`). pub max_position_iterations: usize, /// Maximum number of iterations performed by the position-based constraints solver for CCD steps (default: `10`). /// /// This should be sufficiently high so all penetration get resolved. For example, if CCD cause your /// objects to stutter, that may be because the number of CCD position iterations is too low, causing /// them to remain stuck in a penetration configuration for a few frames. /// /// The highest this number, the highest its computational cost. pub max_ccd_position_iterations: usize, /// Maximum number of substeps performed by the solver (default: `1`). pub max_ccd_substeps: usize, /// Controls the number of Proximity::Intersecting events generated by a trigger during CCD resolution (default: `false`). /// /// If false, triggers will only generate one Proximity::Intersecting event during a step, even /// if another colliders repeatedly enters and leaves the triggers during multiple CCD substeps. /// /// If true, triggers will generate as many Proximity::Intersecting and Proximity::Disjoint/Proximity::WithinMargin /// events as the number of times a collider repeatedly enters and leaves the triggers during multiple CCD substeps. /// This is more computationally intensive. pub multiple_ccd_substep_sensor_events_enabled: bool, /// Whether penetration are taken into account in CCD resolution (default: `false`). /// /// If this is set to `false` two penetrating colliders will not be considered to have any time of impact /// while they are penetrating. This may end up allowing some tunelling, but will avoid stuttering effect /// when the constraints solver fails to completely separate two colliders after a CCD contact. /// /// If this is set to `true`, two penetrating colliders will be considered to have a time of impact /// equal to 0 until the constraints solver manages to separate them. This will prevent tunnelling /// almost completely, but may introduce stuttering effects when the constraints solver fails to completely /// seperate two colliders after a CCD contact. // FIXME: this is a very binary way of handling penetration. // We should provide a more flexible solution by letting the user choose some // minimal amount of movement applied to an object that get stuck. pub ccd_on_penetration_enabled: bool, } impl<N: RealField> IntegrationParameters<N> { /// Creates a set of integration parameters with the given values. pub fn new( dt: N, erp: N, warmstart_coeff: N, restitution_velocity_threshold: N, allowed_linear_error: N, allowed_angular_error: N, max_linear_correction: N, max_angular_correction: N, max_stabilization_multiplier: N, max_velocity_iterations: usize, max_position_iterations: usize, max_ccd_position_iterations: usize, max_ccd_substeps: usize, return_after_ccd_substep: bool, multiple_ccd_substep_sensor_events_enabled: bool, ccd_on_penetration_enabled: bool, ) -> Self { IntegrationParameters { t: N::zero(), dt, inv_dt: if dt == N::zero() { N::zero() } else { N::one() / dt }, erp, warmstart_coeff, restitution_velocity_threshold, allowed_linear_error, allowed_angular_error, max_linear_correction, max_angular_correction, max_stabilization_multiplier, max_velocity_iterations, max_position_iterations, max_ccd_position_iterations, max_ccd_substeps, return_after_ccd_substep, multiple_ccd_substep_sensor_events_enabled, ccd_on_penetration_enabled, } } /// The current time-stepping length. #[inline(always)] pub fn dt(&self) -> N { self.dt } /// The inverse of the time-stepping length. /// /// This is zero if `self.dt` is zero. #[inline(always)] pub fn inv_dt(&self) -> N { self.inv_dt } /// Sets the time-stepping length. /// /// This automatically recompute `self.inv_dt`. #[inline] pub fn set_dt(&mut self, dt: N) { assert!( dt >= N::zero(), "The time-stepping length cannot be negative." ); self.dt = dt; if dt == N::zero() { self.inv_dt = N::zero() } else { self.inv_dt = N::one() / dt } } /// Sets the inverse time-stepping length (i.e. the frequency). /// /// This automatically recompute `self.dt`. #[inline] pub fn set_inv_dt(&mut self, inv_dt: N) { self.inv_dt = inv_dt; if inv_dt == N::zero() { self.dt = N::zero() } else { self.dt = N::one() / inv_dt } } } impl<N: RealField> Default for IntegrationParameters<N> { fn default() -> Self { Self::new( na::convert(1.0 / 60.0), na::convert(0.2), na::convert(1.0), na::convert(1.0), na::convert(0.001), na::convert(0.001), na::convert(0.2), na::convert(0.2), na::convert(0.2), 8, 3, 10, 1, false, false, false, ) } }
sebcrozet/nphysics
src/solver/integration_parameters.rs
Rust
bsd-3-clause
7,763
#!/usr/bin/env python3 """Creates training data for the BERT network training (noisified + masked gold predictions) using the input corpus. The masked Gold predictions use Neural Monkey's PAD_TOKEN to indicate tokens that should not be classified during training. We only leave `coverage` percent of symbols for classification. These symbols are left unchanged on input with a probability of `1 - mask_prob`. If they are being changed, they are replaced by the `mask_token` with a probability of `1 - replace_prob` and by a random vocabulary token otherwise. """ import argparse import os import numpy as np from neuralmonkey.logging import log as _log from neuralmonkey.vocabulary import ( Vocabulary, PAD_TOKEN, UNK_TOKEN, from_wordlist) def log(message: str, color: str = "blue") -> None: _log(message, color) def main() -> None: parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--input_file", type=str, default="/dev/stdin") parser.add_argument("--vocabulary", type=str, required=True) parser.add_argument("--output_prefix", type=str, default=None) parser.add_argument("--mask_token", type=str, default=UNK_TOKEN, help="token used to mask the tokens") parser.add_argument("--coverage", type=float, default=0.15, help=("percentage of tokens that should be left " "for classification during training")) parser.add_argument("--mask_prob", type=float, default=0.8, help=("probability of the classified token being " "replaced by a different token on input")) parser.add_argument("--replace_prob", type=float, default=0.1, help=("probability of the classified token being " "replaced by a random token instead of " "mask_token")) parser.add_argument("--vocab_contains_header", type=bool, default=True) parser.add_argument("--vocab_contains_frequencies", type=bool, default=True) args = parser.parse_args() assert (args.coverage <= 1 and args.coverage >= 0) assert (args.mask_prob <= 1 and args.mask_prob >= 0) assert (args.replace_prob <= 1 and args.replace_prob >= 0) log("Loading vocabulary.") vocabulary = from_wordlist( args.vocabulary, contains_header=args.vocab_contains_header, contains_frequencies=args.vocab_contains_frequencies) mask_prob = args.mask_prob replace_prob = args.replace_prob keep_prob = 1 - mask_prob - replace_prob sample_probs = (keep_prob, mask_prob, replace_prob) output_prefix = args.output_prefix if output_prefix is None: output_prefix = args.input_file out_f_noise = "{}.noisy".format(output_prefix) out_f_mask = "{}.mask".format(output_prefix) out_noise_h = open(out_f_noise, "w", encoding="utf-8") out_mask_h = open(out_f_mask, "w", encoding="utf-8") log("Processing data.") with open(args.input_file, "r", encoding="utf-8") as input_h: # TODO: performance optimizations for line in input_h: line = line.strip().split(" ") num_samples = int(args.coverage * len(line)) sampled_indices = np.random.choice(len(line), num_samples, False) output_noisy = list(line) output_masked = [PAD_TOKEN] * len(line) for i in sampled_indices: random_token = np.random.choice(vocabulary.index_to_word[4:]) new_token = np.random.choice( [line[i], args.mask_token, random_token], p=sample_probs) output_noisy[i] = new_token output_masked[i] = line[i] out_noise_h.write(str(" ".join(output_noisy)) + "\n") out_mask_h.write(str(" ".join(output_masked)) + "\n") if __name__ == "__main__": main()
ufal/neuralmonkey
scripts/preprocess_bert.py
Python
bsd-3-clause
3,940
<?php /* @var $this \yii\web\View */ /* @var $content string */ use backend\assets\AppAsset; use yii\helpers\Html; use yii\bootstrap\Nav; use yii\bootstrap\NavBar; use yii\widgets\Breadcrumbs; use common\widgets\Alert; AppAsset::register($this); ?> <?php $this->beginPage() ?> <!DOCTYPE html> <html lang="<?= Yii::$app->language ?>"> <head> <meta charset="<?= Yii::$app->charset ?>"> <meta name="viewport" content="width=device-width, initial-scale=1"> <?= Html::csrfMetaTags() ?> <title><?= Html::encode($this->title) ?></title> <?php $this->head() ?> </head> <body> <?php $this->beginBody() ?> <div class="wrap"> <?php NavBar::begin([ 'brandLabel' => 'My Company', 'brandUrl' => Yii::$app->homeUrl, 'options' => [ 'class' => 'navbar-inverse navbar-fixed-top', ], ]); $menuItems = [ ['label' => 'Home', 'url' => ['/site/index']], ['label' => 'Todo', 'url' => ['/todo/todo/index']] ]; if (Yii::$app->user->isGuest) { $menuItems[] = ['label' => 'Login', 'url' => ['/site/login']]; } else { $menuItems[] = [ 'label' => 'Logout (' . Yii::$app->user->identity->username . ')', 'url' => ['/site/logout'], 'linkOptions' => ['data-method' => 'post'] ]; } echo Nav::widget([ 'options' => ['class' => 'navbar-nav navbar-right'], 'items' => $menuItems, ]); NavBar::end(); ?> <div class="container"> <?= Breadcrumbs::widget([ 'links' => isset($this->params['breadcrumbs']) ? $this->params['breadcrumbs'] : [], ]) ?> <?= Alert::widget() ?> <?= $content ?> </div> </div> <footer class="footer"> <div class="container"> <p class="pull-left">&copy; My Company <?= date('Y') ?></p> <p class="pull-right"><?= Yii::powered() ?></p> </div> </footer> <?php $this->endBody() ?> </body> </html> <?php $this->endPage() ?>
deka6pb/yii2-todo-service
backend/views/layouts/main.php
PHP
bsd-3-clause
1,987
/*********************************************************************************************************************** ** ** Copyright (c) 2011, 2014 ETH Zurich ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without modification, are permitted provided that the ** following conditions are met: ** ** * Redistributions of source code must retain the above copyright notice, this list of conditions and the ** following disclaimer. ** * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the ** following disclaimer in the documentation and/or other materials provided with the distribution. ** * Neither the name of the ETH Zurich nor the names of its contributors may be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, ** INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ** SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ** WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** **********************************************************************************************************************/ #include "cppimport.h" #include "SelfTest/src/SelfTestSuite.h" Q_EXPORT_PLUGIN2( cppimport, CppImport::CppImport ) namespace CppImport { bool CppImport::initialize(Core::EnvisionManager&) { return true; } void CppImport::unload() { } void CppImport::selfTest(QString testid) { if (testid.isEmpty()) SelfTest::TestManager<CppImport>::runAllTests().printResultStatistics(); else SelfTest::TestManager<CppImport>::runTest(testid).printResultStatistics(); } }
patrick-luethi/Envision
CppImport/src/cppimport.cpp
C++
bsd-3-clause
2,296
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <title>Code Coverage for /home/david/Documentos/curso/php/zf2.intermediario/module/SONAcl/src/SONAcl/Form/Role.php</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link href="../../../css/bootstrap.min.css" rel="stylesheet"> <link href="../../../css/style.css" rel="stylesheet"> <!--[if lt IE 9]> <script src="../../../js/html5shiv.min.js"></script> <script src="../../../js/respond.min.js"></script> <![endif]--> </head> <body> <header> <div class="container"> <div class="row"> <div class="col-md-12"> <ol class="breadcrumb"> <li><a href="../../../index.html">/home/david/Documentos/curso/php/zf2.intermediario/module/SONAcl</a></li> <li><a href="../../index.html">src</a></li> <li><a href="../index.html">SONAcl</a></li> <li><a href="index.html">Form</a></li> <li class="active">Role.php</li> </ol> </div> </div> </div> </header> <div class="container"> <table class="table table-bordered"> <thead> <tr> <td>&nbsp;</td> <td colspan="10"><div align="center"><strong>Code Coverage</strong></div></td> </tr> <tr> <td>&nbsp;</td> <td colspan="3"><div align="center"><strong>Classes and Traits</strong></div></td> <td colspan="4"><div align="center"><strong>Functions and Methods</strong></div></td> <td colspan="3"><div align="center"><strong>Lines</strong></div></td> </tr> </thead> <tbody> <tr> <td class="danger">Total</td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;1</div></td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;1</div></td> <td class="danger small"><abbr title="Change Risk Anti-Patterns (CRAP) Index">CRAP</abbr></td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;43</div></td> </tr> <tr> <td class="danger">Role</td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;1</div></td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;1</div></td> <td class="danger small">2</td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;43</div></td> </tr> <tr> <td class="danger" colspan="4">&nbsp;<a href="#11"><abbr title="__construct($name = null, array $parent = null)">__construct</abbr></a></td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;1</div></td> <td class="danger small">2</td> <td class="danger big"> <div class="progress"> <div class="progress-bar progress-bar-danger" role="progressbar" aria-valuenow="0.00" aria-valuemin="0" aria-valuemax="100" style="width: 0.00%"> <span class="sr-only">0.00% covered (danger)</span> </div> </div> </td> <td class="danger small"><div align="right">0.00%</div></td> <td class="danger small"><div align="right">0&nbsp;/&nbsp;43</div></td> </tr> </tbody> </table> <table id="code" class="table table-borderless table-condensed"> <tbody> <tr><td><div align="right"><a name="1"></a><a href="#1">1</a></div></td><td class="codeLine"><span class="default">&lt;?php</span></td></tr> <tr><td><div align="right"><a name="2"></a><a href="#2">2</a></div></td><td class="codeLine"></td></tr> <tr><td><div align="right"><a name="3"></a><a href="#3">3</a></div></td><td class="codeLine"><span class="keyword">namespace</span><span class="default">&nbsp;</span><span class="default">SONAcl</span><span class="default">\</span><span class="default">Form</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="4"></a><a href="#4">4</a></div></td><td class="codeLine"></td></tr> <tr><td><div align="right"><a name="5"></a><a href="#5">5</a></div></td><td class="codeLine"><span class="keyword">use</span><span class="default">&nbsp;</span><span class="default">Zend</span><span class="default">\</span><span class="default">Form</span><span class="default">\</span><span class="default">Form</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="6"></a><a href="#6">6</a></div></td><td class="codeLine"></td></tr> <tr><td><div align="right"><a name="7"></a><a href="#7">7</a></div></td><td class="codeLine"><span class="keyword">class</span><span class="default">&nbsp;</span><span class="default">Role</span><span class="default">&nbsp;</span><span class="keyword">extends</span><span class="default">&nbsp;</span><span class="default">Form</span><span class="default">&nbsp;</span><span class="keyword">{</span></td></tr> <tr><td><div align="right"><a name="8"></a><a href="#8">8</a></div></td><td class="codeLine"></td></tr> <tr><td><div align="right"><a name="9"></a><a href="#9">9</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">protected</span><span class="default">&nbsp;</span><span class="default">$parent</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="10"></a><a href="#10">10</a></div></td><td class="codeLine"></td></tr> <tr><td><div align="right"><a name="11"></a><a href="#11">11</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">public</span><span class="default">&nbsp;</span><span class="keyword">function</span><span class="default">&nbsp;</span><span class="default">__construct</span><span class="keyword">(</span><span class="default">$name</span><span class="default">&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="default">null</span><span class="keyword">,</span><span class="default">&nbsp;</span><span class="keyword">array</span><span class="default">&nbsp;</span><span class="default">$parent</span><span class="default">&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="default">null</span><span class="keyword">)</span><span class="default">&nbsp;</span><span class="keyword">{</span></td></tr> <tr class="danger"><td><div align="right"><a name="12"></a><a href="#12">12</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">parent</span><span class="default">::</span><span class="default">__construct</span><span class="keyword">(</span><span class="default">'roles'</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="13"></a><a href="#13">13</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">parent</span><span class="default">&nbsp;&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="default">$parent</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="14"></a><a href="#14">14</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="15"></a><a href="#15">15</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">setAttribute</span><span class="keyword">(</span><span class="default">'method'</span><span class="keyword">,</span><span class="default">&nbsp;</span><span class="default">'post'</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="16"></a><a href="#16">16</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="17"></a><a href="#17">17</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$id</span><span class="default">&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="keyword">new</span><span class="default">&nbsp;</span><span class="default">\</span><span class="default">Zend</span><span class="default">\</span><span class="default">Form</span><span class="default">\</span><span class="default">Element</span><span class="default">\</span><span class="default">Hidden</span><span class="keyword">(</span><span class="default">'id'</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="18"></a><a href="#18">18</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">add</span><span class="keyword">(</span><span class="default">$id</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="19"></a><a href="#19">19</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="20"></a><a href="#20">20</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">add</span><span class="keyword">(</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="21"></a><a href="#21">21</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'name'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'nome'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="22"></a><a href="#22">22</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'options'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="23"></a><a href="#23">23</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'type'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Zend\Form\Element\Text'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="24"></a><a href="#24">24</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'label'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Nome:'</span></td></tr> <tr class="danger"><td><div align="right"><a name="25"></a><a href="#25">25</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="26"></a><a href="#26">26</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'attributes'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="27"></a><a href="#27">27</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'id'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'nome'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="28"></a><a href="#28">28</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'class'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'form-control&nbsp;input-lg'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="29"></a><a href="#29">29</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'placeholder'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Entre&nbsp;com&nbsp;o&nbsp;nome'</span></td></tr> <tr class="danger"><td><div align="right"><a name="30"></a><a href="#30">30</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span></td></tr> <tr class="danger"><td><div align="right"><a name="31"></a><a href="#31">31</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="32"></a><a href="#32">32</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="33"></a><a href="#33">33</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$allParent</span><span class="default">&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="default">array_merge</span><span class="keyword">(</span><span class="keyword">array</span><span class="keyword">(</span><span class="default">0</span><span class="default">=&gt;</span><span class="default">'Nenhum'</span><span class="keyword">)</span><span class="keyword">,</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">parent</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="34"></a><a href="#34">34</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">add</span><span class="keyword">(</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="35"></a><a href="#35">35</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'type'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Zend\Form\Element\Select'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="36"></a><a href="#36">36</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'name'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'parent'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="37"></a><a href="#37">37</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'attributes'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;&nbsp;</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="38"></a><a href="#38">38</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'id'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'parent'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="39"></a><a href="#39">39</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'class'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'form-control'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="40"></a><a href="#40">40</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="41"></a><a href="#41">41</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'options'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="42"></a><a href="#42">42</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'label'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Herda:'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="43"></a><a href="#43">43</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'options'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">$allParent</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="44"></a><a href="#44">44</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="45"></a><a href="#45">45</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="46"></a><a href="#46">46</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="47"></a><a href="#47">47</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$isAdmin</span><span class="default">&nbsp;</span><span class="keyword">=</span><span class="default">&nbsp;</span><span class="keyword">new</span><span class="default">&nbsp;</span><span class="default">\</span><span class="default">Zend</span><span class="default">\</span><span class="default">Form</span><span class="default">\</span><span class="default">Element</span><span class="default">\</span><span class="default">Checkbox</span><span class="keyword">(</span><span class="default">&quot;isAdmin&quot;</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="48"></a><a href="#48">48</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$isAdmin</span><span class="default">-&gt;</span><span class="default">setLabel</span><span class="keyword">(</span><span class="default">&quot;Admin?:&nbsp;&quot;</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="49"></a><a href="#49">49</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">add</span><span class="keyword">(</span><span class="default">$isAdmin</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr><td><div align="right"><a name="50"></a><a href="#50">50</a></div></td><td class="codeLine"></td></tr> <tr class="danger"><td><div align="right"><a name="51"></a><a href="#51">51</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">$this</span><span class="default">-&gt;</span><span class="default">add</span><span class="keyword">(</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="52"></a><a href="#52">52</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'name'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'submit'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="53"></a><a href="#53">53</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'type'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Zend\Form\Element\Submit'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="54"></a><a href="#54">54</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'attributes'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="keyword">array</span><span class="keyword">(</span></td></tr> <tr class="danger"><td><div align="right"><a name="55"></a><a href="#55">55</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'value'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'Salvar'</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="56"></a><a href="#56">56</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'id'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">&quot;id-button-form&quot;</span><span class="keyword">,</span></td></tr> <tr class="danger"><td><div align="right"><a name="57"></a><a href="#57">57</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="default">'class'</span><span class="default">&nbsp;</span><span class="default">=&gt;</span><span class="default">&nbsp;</span><span class="default">'btn&nbsp;btn-large&nbsp;btn-success'</span></td></tr> <tr class="danger"><td><div align="right"><a name="58"></a><a href="#58">58</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span></td></tr> <tr class="danger"><td><div align="right"><a name="59"></a><a href="#59">59</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">)</span><span class="keyword">)</span><span class="keyword">;</span></td></tr> <tr class="danger"><td><div align="right"><a name="60"></a><a href="#60">60</a></div></td><td class="codeLine"><span class="default">&nbsp;&nbsp;&nbsp;&nbsp;</span><span class="keyword">}</span></td></tr> <tr><td><div align="right"><a name="61"></a><a href="#61">61</a></div></td><td class="codeLine"><span class="keyword">}</span></td></tr> </tbody> </table> <footer> <hr/> <h4>Legend</h4> <p> <span class="success"><strong>Executed</strong></span> <span class="danger"><strong>Not Executed</strong></span> <span class="warning"><strong>Dead Code</strong></span> </p> <p> <small>Generated by <a href="http://github.com/sebastianbergmann/php-code-coverage" target="_top">PHP_CodeCoverage 2.1.8</a> using <a href="http://php.net/" target="_top">PHP 5.6.13-0+deb8u1</a> and <a href="http://phpunit.de/">PHPUnit 4.7.7</a> at Mon Nov 9 20:40:43 BRST 2015.</small> </p> <a title="Back to the top" id="toplink" href="#"><span class="glyphicon glyphicon-arrow-up"></span></a> </footer> </div> <script src="../../../js/jquery.min.js" type="text/javascript"></script> <script src="../../../js/bootstrap.min.js" type="text/javascript"></script> <script src="../../../js/holder.min.js" type="text/javascript"></script> <script type="text/javascript"> $(function() { var $window = $(window) , $top_link = $('#toplink') , $body = $('body, html') , offset = $('#code').offset().top; $top_link.hide().click(function(event) { event.preventDefault(); $body.animate({scrollTop:0}, 800); }); $window.scroll(function() { if($window.scrollTop() > offset) { $top_link.fadeIn(); } else { $top_link.fadeOut(); } }).scroll(); $('.popin').popover({trigger: 'hover'}); }); </script> </body> </html>
davidpetro88/zf2_gema
module/SONAcl/test/_reports/coverage/src/SONAcl/Form/Role.php.html
HTML
bsd-3-clause
29,852
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/shell.h" #include "base/utf_string_conversions.h" // ASCIIToUTF16 #include "ui/aura/root_window.h" #include "ui/aura/window.h" #include "ui/gfx/canvas.h" #include "ui/views/controls/button/checkbox.h" #include "ui/views/controls/button/label_button.h" #include "ui/views/controls/button/radio_button.h" #include "ui/views/widget/widget.h" #include "ui/views/widget/widget_delegate.h" namespace { // Default window position. const int kWindowLeft = 170; const int kWindowTop = 200; // Default window size. const int kWindowWidth = 400; const int kWindowHeight = 400; // A window showing samples of commonly used widgets. class WidgetsWindow : public views::WidgetDelegateView { public: WidgetsWindow(); virtual ~WidgetsWindow(); // Overridden from views::View: virtual void OnPaint(gfx::Canvas* canvas) OVERRIDE; virtual void Layout() OVERRIDE; virtual gfx::Size GetPreferredSize() OVERRIDE; // Overridden from views::WidgetDelegate: virtual views::View* GetContentsView() OVERRIDE; virtual string16 GetWindowTitle() const OVERRIDE; virtual bool CanResize() const OVERRIDE; private: views::LabelButton* button_; views::LabelButton* disabled_button_; views::Checkbox* checkbox_; views::Checkbox* checkbox_disabled_; views::Checkbox* checkbox_checked_; views::Checkbox* checkbox_checked_disabled_; views::RadioButton* radio_button_; views::RadioButton* radio_button_disabled_; views::RadioButton* radio_button_selected_; views::RadioButton* radio_button_selected_disabled_; }; WidgetsWindow::WidgetsWindow() : button_(new views::LabelButton(NULL, ASCIIToUTF16("Button"))), disabled_button_( new views::LabelButton(NULL, ASCIIToUTF16("Disabled button"))), checkbox_(new views::Checkbox(ASCIIToUTF16("Checkbox"))), checkbox_disabled_(new views::Checkbox( ASCIIToUTF16("Checkbox disabled"))), checkbox_checked_(new views::Checkbox(ASCIIToUTF16("Checkbox checked"))), checkbox_checked_disabled_(new views::Checkbox( ASCIIToUTF16("Checkbox checked disabled"))), radio_button_(new views::RadioButton(ASCIIToUTF16("Radio button"), 0)), radio_button_disabled_(new views::RadioButton( ASCIIToUTF16("Radio button disabled"), 0)), radio_button_selected_(new views::RadioButton( ASCIIToUTF16("Radio button selected"), 0)), radio_button_selected_disabled_(new views::RadioButton( ASCIIToUTF16("Radio button selected disabled"), 1)) { button_->SetStyle(views::Button::STYLE_NATIVE_TEXTBUTTON); AddChildView(button_); disabled_button_->SetEnabled(false); disabled_button_->SetStyle(views::Button::STYLE_NATIVE_TEXTBUTTON); AddChildView(disabled_button_); AddChildView(checkbox_); checkbox_disabled_->SetEnabled(false); AddChildView(checkbox_disabled_); checkbox_checked_->SetChecked(true); AddChildView(checkbox_checked_); checkbox_checked_disabled_->SetChecked(true); checkbox_checked_disabled_->SetEnabled(false); AddChildView(checkbox_checked_disabled_); AddChildView(radio_button_); radio_button_disabled_->SetEnabled(false); AddChildView(radio_button_disabled_); radio_button_selected_->SetChecked(true); AddChildView(radio_button_selected_); radio_button_selected_disabled_->SetChecked(true); radio_button_selected_disabled_->SetEnabled(false); AddChildView(radio_button_selected_disabled_); } WidgetsWindow::~WidgetsWindow() { } void WidgetsWindow::OnPaint(gfx::Canvas* canvas) { canvas->FillRect(GetLocalBounds(), SK_ColorWHITE); } void WidgetsWindow::Layout() { const int kVerticalPad = 5; int left = 5; int top = kVerticalPad; for (int i = 0; i < child_count(); ++i) { views::View* view = child_at(i); gfx::Size preferred = view->GetPreferredSize(); view->SetBounds(left, top, preferred.width(), preferred.height()); top += preferred.height() + kVerticalPad; } } gfx::Size WidgetsWindow::GetPreferredSize() { return gfx::Size(kWindowWidth, kWindowHeight); } views::View* WidgetsWindow::GetContentsView() { return this; } string16 WidgetsWindow::GetWindowTitle() const { return ASCIIToUTF16("Examples: Widgets"); } bool WidgetsWindow::CanResize() const { return true; } } // namespace namespace ash { namespace shell { void CreateWidgetsWindow() { gfx::Rect bounds(kWindowLeft, kWindowTop, kWindowWidth, kWindowHeight); views::Widget* widget = views::Widget::CreateWindowWithContextAndBounds( new WidgetsWindow, Shell::GetPrimaryRootWindow(), bounds); widget->GetNativeView()->SetName("WidgetsWindow"); widget->Show(); } } // namespace shell } // namespace ash
timopulkkinen/BubbleFish
ash/shell/widgets.cc
C++
bsd-3-clause
4,821
/*------------------------------------------------------------------------- * * Copyright (c) 2003-2011, PostgreSQL Global Development Group * * *------------------------------------------------------------------------- */ package org.postgresql.geometric; import org.postgresql.util.GT; import org.postgresql.util.PGobject; import org.postgresql.util.PGtokenizer; import org.postgresql.util.PSQLException; import org.postgresql.util.PSQLState; import java.io.Serializable; import java.sql.SQLException; /** * This represents the box datatype within org.postgresql. */ public class PGbox extends PGobject implements Serializable, Cloneable { /** * These are the two points. */ public PGpoint point[] = new PGpoint[2]; /** * @param x1 first x coordinate * @param y1 first y coordinate * @param x2 second x coordinate * @param y2 second y coordinate */ public PGbox(double x1, double y1, double x2, double y2) { this(); this.point[0] = new PGpoint(x1, y1); this.point[1] = new PGpoint(x2, y2); } /** * @param p1 first point * @param p2 second point */ public PGbox(PGpoint p1, PGpoint p2) { this(); this.point[0] = p1; this.point[1] = p2; } /** * @param s Box definition in PostgreSQL syntax * @exception SQLException if definition is invalid */ public PGbox(String s) throws SQLException { this(); setValue(s); } /** * Required constructor */ public PGbox() { setType("box"); } /** * This method sets the value of this object. It should be overidden, * but still called by subclasses. * * @param value a string representation of the value of the object * @exception SQLException thrown if value is invalid for this type */ public void setValue(String value) throws SQLException { PGtokenizer t = new PGtokenizer(value, ','); if (t.getSize() != 2) throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", new Object[]{type,value}), PSQLState.DATA_TYPE_MISMATCH); point[0] = new PGpoint(t.getToken(0)); point[1] = new PGpoint(t.getToken(1)); } /** * @param obj Object to compare with * @return true if the two boxes are identical */ public boolean equals(Object obj) { if (obj instanceof PGbox) { PGbox p = (PGbox)obj; // Same points. if (p.point[0].equals(point[0]) && p.point[1].equals(point[1])) return true; // Points swapped. if (p.point[0].equals(point[1]) && p.point[1].equals(point[0])) return true; // Using the opposite two points of the box: // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1) if (p.point[0].x == point[0].x && p.point[0].y == point[1].y && p.point[1].x == point[1].x && p.point[1].y == point[0].y) return true; // Using the opposite two points of the box, and the points are swapped // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2) if (p.point[0].x == point[1].x && p.point[0].y == point[0].y && p.point[1].x == point[0].x && p.point[1].y == point[1].y) return true; } return false; } public int hashCode() { // This relies on the behaviour of point's hashcode being an exclusive-OR of // its X and Y components; we end up with an exclusive-OR of the two X and // two Y components, which is equal whenever equals() would return true // since xor is commutative. return point[0].hashCode() ^ point[1].hashCode(); } public Object clone() throws CloneNotSupportedException { PGbox newPGbox = (PGbox) super.clone(); if( newPGbox.point != null ) { newPGbox.point = (PGpoint[]) newPGbox.point.clone(); for( int i = 0; i < newPGbox.point.length; ++i ) if( newPGbox.point[i] != null ) newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone(); } return newPGbox; } /** * @return the PGbox in the syntax expected by org.postgresql */ public String getValue() { return point[0].toString() + "," + point[1].toString(); } }
thkoch2001/libpostgresql-jdbc-java
org/postgresql/geometric/PGbox.java
Java
bsd-3-clause
4,461
// Copyright 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CC_RESOURCE_UPDATE_CONTROLLER_H_ #define CC_RESOURCE_UPDATE_CONTROLLER_H_ #include "base/basictypes.h" #include "base/memory/scoped_ptr.h" #include "base/memory/weak_ptr.h" #include "base/time.h" #include "cc/cc_export.h" #include "cc/resource_update_queue.h" namespace cc { class ResourceProvider; class Thread; class ResourceUpdateControllerClient { public: virtual void readyToFinalizeTextureUpdates() = 0; protected: virtual ~ResourceUpdateControllerClient() { } }; class CC_EXPORT ResourceUpdateController { public: static scoped_ptr<ResourceUpdateController> create(ResourceUpdateControllerClient* client, Thread* thread, scoped_ptr<ResourceUpdateQueue> queue, ResourceProvider* resourceProvider) { return make_scoped_ptr(new ResourceUpdateController(client, thread, queue.Pass(), resourceProvider)); } static size_t maxPartialTextureUpdates(); virtual ~ResourceUpdateController(); // Discard uploads to textures that were evicted on the impl thread. void discardUploadsToEvictedResources(); void performMoreUpdates(base::TimeTicks timeLimit); void finalize(); // Virtual for testing. virtual base::TimeTicks now() const; virtual base::TimeDelta updateMoreTexturesTime() const; virtual size_t updateMoreTexturesSize() const; protected: ResourceUpdateController(ResourceUpdateControllerClient*, Thread*, scoped_ptr<ResourceUpdateQueue>, ResourceProvider*); private: static size_t maxFullUpdatesPerTick(ResourceProvider*); size_t maxBlockingUpdates() const; base::TimeDelta pendingUpdateTime() const; void updateTexture(ResourceUpdate); // This returns true when there were textures left to update. bool updateMoreTexturesIfEnoughTimeRemaining(); void updateMoreTexturesNow(); void onTimerFired(); ResourceUpdateControllerClient* m_client; scoped_ptr<ResourceUpdateQueue> m_queue; bool m_contentsTexturesPurged; ResourceProvider* m_resourceProvider; base::TimeTicks m_timeLimit; size_t m_textureUpdatesPerTick; bool m_firstUpdateAttempt; Thread* m_thread; base::WeakPtrFactory<ResourceUpdateController> m_weakFactory; bool m_taskPosted; DISALLOW_COPY_AND_ASSIGN(ResourceUpdateController); }; } // namespace cc #endif // CC_RESOURCE_UPDATE_CONTROLLER_H_
zcbenz/cefode-chromium
cc/resource_update_controller.h
C
bsd-3-clause
2,501
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase class StrTests(TranspileTestCase): def test_setattr(self): self.assertCodeExecution(""" x = "Hello, world" x.attr = 42 print('Done.') """) def test_endswith(self): self.assertCodeExecution(""" s = "abracadabra" suffix = "abra" print(s.endswith(end)) """) self.assertCodeExecution(""" s = "abracadabra" suffix = "ABRA" print(s.endswith(end)) """) self.assertCodeExecution(""" s = "ABRACADABRA" suffix = "abra" print(s.endswith(end)) """) # self.assertCodeExecution(""" # print('abracadabra'.endswith('abra')) # """) def test_getattr(self): self.assertCodeExecution(""" x = "Hello, world" print(x.attr) print('Done.') """) def test_getitem(self): # Simple positive index self.assertCodeExecution(""" x = "12345" print(x[2]) """) # Simple negative index self.assertCodeExecution(""" x = "12345" print(x[-2]) """) # Positive index out of range self.assertCodeExecution(""" x = "12345" print(x[10]) """) # Negative index out of range self.assertCodeExecution(""" x = "12345" print(x[-10]) """) def test_slice(self): # Full slice self.assertCodeExecution(""" x = "12345" print(x[:]) """) # Left bound slice self.assertCodeExecution(""" x = "12345" print(x[1:]) """) # Right bound slice self.assertCodeExecution(""" x = "12345" print(x[:4]) """) # Slice bound in both directions self.assertCodeExecution(""" x = "12345" print(x[1:4]) """) # Slice bound in both directions with end out of bounds self.assertCodeExecution(""" x = "12345" print(x[1:6]) """) # Slice bound in both directions with start out of bounds self.assertCodeExecution(""" x = "12345" print(x[6:7]) """) def test_case_changes(self): self.assertCodeExecution(""" for s in ['hello, world', 'HEllo, WORLD', 'átomo', '']: print(s.capitalize()) print(s.lower()) # print(s.swap()) print(s.title()) print(s.upper()) """) def test_index(self): self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('world')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell', 1)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell', 1, 3)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell', 1, 100)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell', 1, -1)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.index('hell', -4)) """) def test_count(self): self.assertCodeExecution(""" s = 'hello hell' print(s.count('e')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('a')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('ll')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('ll', 3)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('ll', 3, 4)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('ll', 0, 4)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('ll', 0, 100)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('hell', 1, -1)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.count('hell', -4)) """) def test_find(self): self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('world')) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell', 1)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell', 1, 3)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell', 1, 100)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell', 1, -1)) """) self.assertCodeExecution(""" s = 'hello hell' print(s.find('hell', -4)) """) def test_expand(self): self.assertCodeExecution(""" print('\\t'.expandtabs()) print('a\\t'.expandtabs()) print('aa\\t'.expandtabs()) print('aaa\\t'.expandtabs()) print('aaaaaaaa\\t'.expandtabs()) print('a\\naa\\t'.expandtabs()) print('\\t'.expandtabs(3)) print('a\\t'.expandtabs(3)) print('aa\\t'.expandtabs(7)) print('aaa\\t'.expandtabs(4)) print('aaaaaaaa\\t'.expandtabs(4)) print('a\\naa\\t'.expandtabs(4)) """) def test_title(self): self.assertCodeExecution(""" s = ' foo bar baz ' print(s.title()) """) def test_len(self): self.assertCodeExecution(""" s = ' foo bar baz ' print(len(s)) """) class UnaryStrOperationTests(UnaryOperationTestCase, TranspileTestCase): data_type = 'str' not_implemented = [ ] class BinaryStrOperationTests(BinaryOperationTestCase, TranspileTestCase): data_type = 'str' not_implemented = [ 'test_add_class', 'test_add_frozenset', 'test_and_class', 'test_and_frozenset', 'test_eq_class', 'test_eq_frozenset', 'test_floor_divide_class', 'test_floor_divide_complex', 'test_floor_divide_frozenset', 'test_ge_class', 'test_ge_frozenset', 'test_gt_class', 'test_gt_frozenset', 'test_le_class', 'test_le_frozenset', 'test_lshift_class', 'test_lshift_frozenset', 'test_lt_class', 'test_lt_frozenset', 'test_modulo_bool', 'test_modulo_bytes', 'test_modulo_bytearray', 'test_modulo_class', 'test_modulo_complex', 'test_modulo_dict', 'test_modulo_float', 'test_modulo_frozenset', 'test_modulo_slice', 'test_modulo_int', 'test_modulo_list', 'test_modulo_None', 'test_modulo_NotImplemented', 'test_modulo_range', 'test_modulo_set', 'test_modulo_str', 'test_modulo_tuple', 'test_multiply_class', 'test_multiply_frozenset', 'test_ne_class', 'test_ne_frozenset', 'test_or_class', 'test_or_frozenset', 'test_power_class', 'test_power_frozenset', 'test_rshift_class', 'test_rshift_frozenset', 'test_subscr_bool', 'test_subscr_class', 'test_subscr_frozenset', 'test_subscr_slice', 'test_subtract_class', 'test_subtract_frozenset', 'test_true_divide_class', 'test_true_divide_frozenset', 'test_xor_class', 'test_xor_frozenset', ] class InplaceStrOperationTests(InplaceOperationTestCase, TranspileTestCase): data_type = 'str' not_implemented = [ 'test_add_class', 'test_add_frozenset', 'test_and_class', 'test_and_frozenset', 'test_floor_divide_class', 'test_floor_divide_complex', 'test_floor_divide_frozenset', 'test_lshift_class', 'test_lshift_frozenset', 'test_modulo_bool', 'test_modulo_bytes', 'test_modulo_bytearray', 'test_modulo_class', 'test_modulo_complex', 'test_modulo_dict', 'test_modulo_float', 'test_modulo_frozenset', 'test_modulo_slice', 'test_modulo_int', 'test_modulo_list', 'test_modulo_None', 'test_modulo_NotImplemented', 'test_modulo_range', 'test_modulo_set', 'test_modulo_str', 'test_modulo_tuple', 'test_multiply_class', 'test_multiply_frozenset', 'test_or_class', 'test_or_frozenset', 'test_power_class', 'test_power_frozenset', 'test_rshift_class', 'test_rshift_frozenset', 'test_subtract_class', 'test_subtract_frozenset', 'test_true_divide_class', 'test_true_divide_frozenset', 'test_xor_class', 'test_xor_frozenset', ]
Felix5721/voc
tests/datatypes/test_str.py
Python
bsd-3-clause
9,931
from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import pgettext_lazy from .base import Product from .variants import (ProductVariant, PhysicalProduct, ColoredVariant, StockedProduct) class Bag(PhysicalProduct, Product, ColoredVariant): class Meta: app_label = 'product' class Shirt(PhysicalProduct, Product, ColoredVariant): class Meta: app_label = 'product' class BagVariant(ProductVariant, StockedProduct): product = models.ForeignKey(Bag, related_name='variants') class Meta: app_label = 'product' @python_2_unicode_compatible class ShirtVariant(ProductVariant, StockedProduct): SIZE_CHOICES = ( ('xs', pgettext_lazy('Variant size', 'XS')), ('s', pgettext_lazy('Variant size', 'S')), ('m', pgettext_lazy('Variant size', 'M')), ('l', pgettext_lazy('Variant size', 'L')), ('xl', pgettext_lazy('Variant size', 'XL')), ('xxl', pgettext_lazy('Variant size', 'XXL'))) product = models.ForeignKey(Shirt, related_name='variants') size = models.CharField( pgettext_lazy('Variant field', 'size'), choices=SIZE_CHOICES, max_length=3) class Meta: app_label = 'product' def __str__(self): return '%s (%s)' % (self.product.name, self.size)
hongquan/saleor
saleor/product/models/products.py
Python
bsd-3-clause
1,423
/* * Copyright (c) 2009, INRIA * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of INRIA nor the names of its contributors may * be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package obvious.data; /** * Schema is an interface describing the columns of a Tuple and by * extension of a Table. Each column is represented by an index, a * name, a Java type and a default value. * Columns could be added or removed. * @see Table * @see Tuple * @author obvious * @version $Revision$ */ public interface Schema extends Table { /** * Metadata title for name. */ String NAME = "Name"; /** * Metadata title for type. */ String TYPE = "Type"; /** * Metadata title for default value. */ String DEFAULT_VALUE = "DefaultValue"; /** * Metadata title for categories. */ String CATEGORIES = "Categories"; /** * Metadata title for scale. */ String SCALE = "Scale"; //nominal, ordinal, interval, ratio /** * Metadata title for format. */ String FORMAT = "Format"; /** * Metadata title for locale. */ String LOCALE = "Locale"; /** * Metadata title for uniq. */ String UNIQ = "Uniq"; /** * Metadata title for has null. */ String HAS_NULL = "HasNull"; // String UNIT = "Unit"; // String HIDDEN = "Hidden"; //TODO /** * Gets the number of columns / data fields in this table. * @return the number of columns */ int getColumnCount(); /** * Gets the data type of the column at the given column index. * @param col the column index * @return the data type (as a Java Class) of the column */ Class<?> getColumnType(int col); /** * Gets the default value for a column. * @param col column index * @return default value for the specified column */ Object getColumnDefault(int col); /** * Checks if the getValue method can return values that are compatibles * with a given type. * @param col column index * @param c type to check * @return true if types are compatibles */ boolean canGet(int col, Class<?> c); /** * Checks if the set method can accept for a specific column values that * are compatible with a given type. * @param col column index * @param c type to check * @return true if the types compatibles */ boolean canSet(int col, Class<?> c); /** * Checks if the getValue method can return values that are compatibles * with a given type. * @param field column name * @param c type to check * @return true if the types are compatibles */ boolean canGet(String field, Class<?> c); /** * Checks if the set method can accept for a specific column values that * are compatible with a given type. * @param field column name * @param c type to check * @return true if the types compatibles */ boolean canSet(String field, Class<?> c); /** * Get the data type of the column with the given data field name. * @param field the column name * @return the data type (as a Java Class) of the column */ Class<?> getColumnType(String field); /** * Internal method indicating if the given data field is included as a * data column. * @param name name to seek * @return true if the name exists. */ boolean hasColumn(String name); /** * Gets the column name. * @param col column index * @return name of the column */ String getColumnName(int col); /** * Get the column number for a given data field name. * @param field the name of the column to lookup * @return the column number of the column, or -1 if the name is not found */ int getColumnIndex(String field); /** * Add a column with the given name and data type to this schema. * It throws a runtime exception when the column name already exists. * @param name name of the column * @param type the data type, as a Java Class, for the column * @param defaultValue the default value for the column * @return the column index */ int addColumn(String name, Class<?> type, Object defaultValue); // /** // * Add a derived column to this table, using an Expression instance to // * dynamically calculate the column data values. // * @param name the data field name for the column // * @param expr the Expression that will determine the column values // */ // public void addColumn(String name, Expression expr); /** * Removes a column. * @param field name of column to remove * @return true if removed */ boolean removeColumn(String field); /** * Removes a column. * @param col column index * @return true if removed */ boolean removeColumn(int col); /** * Gets the corresponding schema without internal columns. * @return a schema only composed by data columns */ Schema getDataSchema(); }
jdfekete/obvious
obvious/src/main/java/obvious/data/Schema.java
Java
bsd-3-clause
6,661
// Covers TCPWRAP and related TCPCONNECTWRAP 'use strict'; const common = require('../common'); const assert = require('assert'); const tick = require('./tick'); const initHooks = require('./init-hooks'); const { checkInvocations } = require('./hook-checks'); if (!common.hasIPv6) { common.skip('IPv6 support required'); return; } const net = require('net'); let tcp1, tcp2, tcp3; let tcpconnect; const hooks = initHooks(); hooks.enable(); const server = net .createServer(common.mustCall(onconnection)) .on('listening', common.mustCall(onlistening)); // Calling server.listen creates a TCPWRAP synchronously { server.listen(common.PORT); const tcps = hooks.activitiesOfTypes('TCPWRAP'); const tcpconnects = hooks.activitiesOfTypes('TCPCONNECTWRAP'); assert.strictEqual(tcps.length, 1); assert.strictEqual(tcpconnects.length, 0); tcp1 = tcps[0]; assert.strictEqual(tcp1.type, 'TCPWRAP'); assert.strictEqual(typeof tcp1.uid, 'number'); assert.strictEqual(typeof tcp1.triggerId, 'number'); checkInvocations(tcp1, { init: 1 }, 'when calling server.listen'); } // Calling net.connect creates another TCPWRAP synchronously { net.connect( { port: server.address().port, host: server.address().address }, common.mustCall(onconnected)); const tcps = hooks.activitiesOfTypes('TCPWRAP'); assert.strictEqual(tcps.length, 2); process.nextTick(() => { const tcpconnects = hooks.activitiesOfTypes('TCPCONNECTWRAP'); assert.strictEqual(tcpconnects.length, 1); }); tcp2 = tcps[1]; assert.strictEqual(tcps.length, 2); assert.strictEqual(tcp2.type, 'TCPWRAP'); assert.strictEqual(typeof tcp2.uid, 'number'); assert.strictEqual(typeof tcp2.triggerId, 'number'); checkInvocations(tcp1, { init: 1 }, 'tcp1 when client is connecting'); checkInvocations(tcp2, { init: 1 }, 'tcp2 when client is connecting'); } function onlistening() { assert.strictEqual(hooks.activitiesOfTypes('TCPWRAP').length, 2); } // Depending on timing we see client: onconnected or server: onconnection first // Therefore we can't depend on any ordering, but when we see a connection for // the first time we assign the tcpconnectwrap. function ontcpConnection(serverConnection) { if (tcpconnect != null) { // When client receives connection first ('onconnected') and the server // second then we see an 'after' here, otherwise not const expected = serverConnection ? { init: 1, before: 1, after: 1 } : { init: 1, before: 1 }; checkInvocations( tcpconnect, expected, 'tcpconnect: when both client and server received connection'); return; } // only focusing on TCPCONNECTWRAP here const tcpconnects = hooks.activitiesOfTypes('TCPCONNECTWRAP'); assert.strictEqual(tcpconnects.length, 1); tcpconnect = tcpconnects[0]; assert.strictEqual(tcpconnect.type, 'TCPCONNECTWRAP'); assert.strictEqual(typeof tcpconnect.uid, 'number'); assert.strictEqual(typeof tcpconnect.triggerId, 'number'); // When client receives connection first ('onconnected'), we 'before' has // been invoked at this point already, otherwise it only was 'init'ed const expected = serverConnection ? { init: 1 } : { init: 1, before: 1 }; checkInvocations(tcpconnect, expected, 'tcpconnect: when tcp connection is established'); } let serverConnected = false; function onconnected() { ontcpConnection(false); // In the case that the client connects before the server TCPWRAP 'before' // and 'after' weren't invoked yet. Also @see ontcpConnection. const expected = serverConnected ? { init: 1, before: 1, after: 1 } : { init: 1 }; checkInvocations(tcp1, expected, 'tcp1 when client connects'); checkInvocations(tcp2, { init: 1 }, 'tcp2 when client connects'); } function onconnection(c) { serverConnected = true; ontcpConnection(true); const tcps = hooks.activitiesOfTypes([ 'TCPWRAP' ]); const tcpconnects = hooks.activitiesOfTypes('TCPCONNECTWRAP'); assert.strictEqual(tcps.length, 3); assert.strictEqual(tcpconnects.length, 1); tcp3 = tcps[2]; assert.strictEqual(tcp3.type, 'TCPWRAP'); assert.strictEqual(typeof tcp3.uid, 'number'); assert.strictEqual(typeof tcp3.triggerId, 'number'); checkInvocations(tcp1, { init: 1, before: 1 }, 'tcp1 when server receives connection'); checkInvocations(tcp2, { init: 1 }, 'tcp2 when server receives connection'); checkInvocations(tcp3, { init: 1 }, 'tcp3 when server receives connection'); c.end(); this.close(common.mustCall(onserverClosed)); } function onserverClosed() { checkInvocations(tcp1, { init: 1, before: 1, after: 1, destroy: 1 }, 'tcp1 when server is closed'); checkInvocations(tcp2, { init: 1, before: 2, after: 2, destroy: 1 }, 'tcp2 when server is closed'); checkInvocations(tcp3, { init: 1, before: 1, after: 1 }, 'tcp3 synchronously when server is closed'); tick(2, () => { checkInvocations(tcp3, { init: 1, before: 2, after: 2, destroy: 1 }, 'tcp3 when server is closed'); checkInvocations(tcpconnect, { init: 1, before: 1, after: 1, destroy: 1 }, 'tcpconnect when server is closed'); }); } process.on('exit', onexit); function onexit() { hooks.disable(); hooks.sanityCheck([ 'TCPWRAP', 'TCPCONNECTWRAP' ]); checkInvocations(tcp1, { init: 1, before: 1, after: 1, destroy: 1 }, 'tcp1 when process exits'); checkInvocations( tcp2, { init: 1, before: 2, after: 2, destroy: 1 }, 'tcp2 when process exits'); checkInvocations( tcp3, { init: 1, before: 2, after: 2, destroy: 1 }, 'tcp3 when process exits'); checkInvocations( tcpconnect, { init: 1, before: 1, after: 1, destroy: 1 }, 'tcpconnect when process exits'); }
RPGOne/Skynet
node-master/test/async-hooks/test-tcpwrap.js
JavaScript
bsd-3-clause
5,807
import java.awt.Component; import javax.swing.JFrame; public class Game extends JFrame { private static final long serialVersionUID = -7803629994015778818L; private static final int WIDTH = 1024; private static final int HEIGHT = 768; public Game() { super("Super UnExtreme Tower Defense X"); setSize(WIDTH,HEIGHT); Background theGame = new Background(WIDTH, HEIGHT); ((Component)theGame).setFocusable(true); setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); setResizable(false); getContentPane().add(theGame); setVisible(true); } public static void main(String args[]) { new Game(); } }
CrimsonVoid/Super-Tower-Defense
src/Game.java
Java
bsd-3-clause
649
<?php namespace Sousrubrique; /** * Class Module * @package Sousrubrique */ class Module { /** * @return mixed */ public function getConfig(){ return include __DIR__ . '/config/module.config.php'; } }
rongeb/anit_cms_for_zf3
module/Sousrubrique/Module.php
PHP
bsd-3-clause
230
/* * Copyright (C) 2002, 2003 The Karbon Developers * Copyright (C) 2006 Alexander Kellett <lypanov@kde.org> * Copyright (C) 2006, 2007 Rob Buis <buis@kde.org> * Copyright (C) 2007, 2009 Apple Inc. All rights reserved. * Copyright (C) Research In Motion Limited 2010. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifndef SVGPathParser_h #define SVGPathParser_h #include "core/CoreExport.h" #include "core/svg/SVGPathData.h" #include "platform/heap/Handle.h" namespace blink { enum PathParsingMode { NormalizedParsing, UnalteredParsing }; class SVGPathConsumer; class SVGPathSource; class CORE_EXPORT SVGPathParser final { WTF_MAKE_NONCOPYABLE(SVGPathParser); STACK_ALLOCATED(); public: SVGPathParser(SVGPathSource* source, SVGPathConsumer* consumer) : m_source(source) , m_consumer(consumer) { ASSERT(m_source); ASSERT(m_consumer); } bool parsePathDataFromSource(PathParsingMode pathParsingMode, bool checkForInitialMoveTo = true) { ASSERT(m_source); ASSERT(m_consumer); if (checkForInitialMoveTo && !initialCommandIsMoveTo()) return false; if (pathParsingMode == NormalizedParsing) return parseAndNormalizePath(); return parsePath(); } private: bool initialCommandIsMoveTo(); bool parsePath(); bool parseAndNormalizePath(); SVGPathSource* m_source; SVGPathConsumer* m_consumer; }; } // namespace blink #endif // SVGPathParser_h
Bysmyyr/chromium-crosswalk
third_party/WebKit/Source/core/svg/SVGPathParser.h
C
bsd-3-clause
2,256
# -*- mode: cmake; -*- # - Try to find rocksdb include dirs and libraries # Usage of this module as follows: # This file defines: # * ROCKSDB_FOUND if protoc was found # * ROCKSDB_LIBRARY The lib to link to (currently only a static unix lib, not # portable) # * ROCKSDB_INCLUDE The include directories for rocksdb. include(FindPackageHandleStandardArgs) # set defaults SET(_rocksdb_HOME "/opt/rocksdb") SET(_rocksdb_INCLUDE_SEARCH_DIRS ${CMAKE_INCLUDE_PATH} /usr/local/include /usr/include /opt/rocksdb/include ) SET(_rocksdb_LIBRARIES_SEARCH_DIRS ${CMAKE_LIBRARY_PATH} /usr/local/lib /usr/lib /opt/rocksdb ) ## if( "${ROCKSDB_HOME}" STREQUAL "") if("" MATCHES "$ENV{ROCKSDB_HOME}") set (ROCKSDB_HOME ${_rocksdb_HOME}) else("" MATCHES "$ENV{ROCKSDB_HOME}") set (ROCKSDB_HOME "$ENV{ROCKSDB_HOME}") endif("" MATCHES "$ENV{ROCKSDB_HOME}") else( "${ROCKSDB_HOME}" STREQUAL "") message(STATUS "ROCKSDB_HOME is not empty: \"${ROCKSDB_HOME}\"") endif( "${ROCKSDB_HOME}" STREQUAL "") ## IF( NOT ${ROCKSDB_HOME} STREQUAL "" ) SET(_rocksdb_INCLUDE_SEARCH_DIRS ${ROCKSDB_HOME}/include ${_rocksdb_INCLUDE_SEARCH_DIRS}) SET(_rocksdb_LIBRARIES_SEARCH_DIRS ${ROCKSDB_HOME}/lib ${_rocksdb_LIBRARIES_SEARCH_DIRS}) SET(_rocksdb_HOME ${ROCKSDB_HOME}) ENDIF( NOT ${ROCKSDB_HOME} STREQUAL "" ) IF( NOT $ENV{ROCKSDB_INCLUDEDIR} STREQUAL "" ) SET(_rocksdb_INCLUDE_SEARCH_DIRS $ENV{ROCKSDB_INCLUDEDIR} ${_rocksdb_INCLUDE_SEARCH_DIRS}) ENDIF( NOT $ENV{ROCKSDB_INCLUDEDIR} STREQUAL "" ) IF( NOT $ENV{ROCKSDB_LIBRARYDIR} STREQUAL "" ) SET(_rocksdb_LIBRARIES_SEARCH_DIRS $ENV{ROCKSDB_LIBRARYDIR} ${_rocksdb_LIBRARIES_SEARCH_DIRS}) ENDIF( NOT $ENV{ROCKSDB_LIBRARYDIR} STREQUAL "" ) IF( ROCKSDB_HOME ) SET(_rocksdb_INCLUDE_SEARCH_DIRS ${ROCKSDB_HOME}/include ${_rocksdb_INCLUDE_SEARCH_DIRS}) SET(_rocksdb_LIBRARIES_SEARCH_DIRS ${ROCKSDB_HOME}/lib ${_rocksdb_LIBRARIES_SEARCH_DIRS}) SET(_rocksdb_HOME ${ROCKSDB_HOME}) ENDIF( ROCKSDB_HOME ) # find the include files FIND_PATH(ROCKSDB_INCLUDE_DIR rocksdb/db.h HINTS ${_rocksdb_INCLUDE_SEARCH_DIRS} ${PC_ROCKSDB_INCLUDEDIR} ${PC_ROCKSDB_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH} ) # locate the library if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") # On MacOS set(ROCKSDB_LIBRARY_NAMES librocksdb.dylib) set(ROCKSDB_LITE_LIBRARY_NAMES librocksdb_lite.dylib) elseif(${CMAKE_SYSTEM_NAME} MATCHES "Linux") # On Linux set(ROCKSDB_LIBRARY_NAMES librocksdb.so) set(ROCKSDB_LITE_LIBRARY_NAMES librocksdb_lite.so) else() set(ROCKSDB_LIBRARY_NAMES librocksdb.a) set(ROCKSDB_LITE_LIBRARY_NAMES librocksdb_lite.a) endif() set(ROCKSDB_STATIC_LIBRARY_NAMES librocksdb.a) set(ROCKSDB_LITE_STATIC_LIBRARY_NAMES librocksdb_lite.a) find_library(ROCKSDB_LIBRARIES NAMES ${ROCKSDB_LIBRARY_NAMES} HINTS ${_rocksdb_LIBRARIES_SEARCH_DIRS} ) find_library(ROCKSDB_LITE_LIBRARIES NAMES ${ROCKSDB_LITE_LIBRARY_NAMES} HINTS ${_rocksdb_LIBRARIES_SEARCH_DIRS} ) find_library(ROCKSDB_STATIC_LIBRARY NAMES ${ROCKSDB_STATIC_LIBRARY_NAMES} HINTS ${_rocksdb_LIBRARIES_SEARCH_DIRS} ) find_library(ROCKSDB_LITE_STATIC_LIBRARY NAMES ${ROCKSDB_LITE_STATIC_LIBRARY_NAMES} HINTS ${_rocksdb_LIBRARIES_SEARCH_DIRS} ) find_library(ROCKSDB_SNAPPY_LIBRARY NAMES libsnappy.a HINTS ${_rocksdb_LIBRARIES_SEARCH_DIRS} ) # If the lite library was found, override and prefer LITE. if(NOT ${ROCKSDB_LITE_LIBRARIES} STREQUAL "ROCKSDB_LITE_LIBRARIES-NOTFOUND") set(ROCKSDB_LIBRARIES ${ROCKSDB_LITE_LIBRARIES}) set(ROCKSDB_LITE_FOUND "YES") endif() if(NOT ${ROCKSDB_LITE_STATIC_LIBRARY} STREQUAL "ROCKSDB_LITE_STATIC_LIBRARY-NOTFOUND") set(ROCKSDB_STATIC_LIBRARY ${ROCKSDB_LITE_STATIC_LIBRARY}) set(ROCKSDB_LITE_FOUND "YES") endif() # If shared libraries are not found, fall back to static. # If not explicitly building using shared libraries, prefer static libraries. if(${ROCKSDB_LIBRARIES} STREQUAL "ROCKSDB_LIBRARIES-NOTFOUND" OR NOT DEFINED $ENV{BUILD_LINK_SHARED}) set(ROCKSDB_LIBRARIES ${ROCKSDB_STATIC_LIBRARY} ${ROCKSDB_SNAPPY_LIBRARY}) LOG_LIBRARY(rocksdb "${ROCKSDB_STATIC_LIBRARY}") LOG_LIBRARY(snappy "${ROCKSDB_SNAPPY_LIBRARY}") else() LOG_LIBRARY(rocksdb "${ROCKSDB_LIBRARIES}") endif() # if the include and the program are found then we have it if(ROCKSDB_INCLUDE_DIR AND ROCKSDB_LIBRARIES) set(ROCKSDB_FOUND "YES") endif()
runt18/osquery
CMake/FindRocksDB.cmake
CMake
bsd-3-clause
4,316
## Events | Name | Description | Attributes | |------|-------------|------------| | Events gathered by the agent cron plugin | CRON | exitCode, executionTime, output, user, cronJob |
CoScale/docs
_includes/plugins/cron.md
Markdown
bsd-3-clause
185
<!doctype html> <html> <head> <meta charset="UTF-8"> <title>{{page.title}} · Cyclopedia</title> <!-- Facebook --> <meta property="og:type" content="article"> <meta property="og:title" content="{{page.title}} · Cyclopedia"> <meta property="og:url" content="http://chris-noel-chan.github.io/large-data-website/index.html"> <!-- Image size: minimum 200x200; recommended 1500x1500 --> <meta property="og:image" content="http://chris-noel-chan.github.io/large-data-website/img/cyclopedia.svg"> <meta property="og:description" content="{{page.description}}"> <meta property="og:site_name" content="Bicycle Companion"> <!-- Twitter --> <meta property="twitter:card" content="summary"> <meta property="twitter:title" content="{{page.title}} · Cyclopedia"> <meta property="twitter:url" content="http://chris-noel-chan.github.io/large-data-website/index.html"> <!-- Image size: 120x120 --> <meta property="twitter:image" content="http://chris-noel-chan.github.io/large-data-website/img/cyclopedia.svg"> <meta property="twitter:description" content="{{page.description}}"> <link href="{{site.baseurl}}/css/normalize.css" rel="stylesheet"> <link href="{{site.baseurl}}/css/general.css" rel="stylesheet"> <link href='http://fonts.googleapis.com/css?family=Oswald:300,400,700' rel='stylesheet' type='text/css'> <link rel="shortcut icon" href="{{site.baseurl}}/img/favicon.ico"> <meta name="handheldfriendly" content="true"> <meta name="mobileoptimized" content="240"> <meta name="viewport" content="width=device-width,initial-scale=1"> <meta name="description" content="{{page.description}}"> <script src="{{site.baseurl}}/js/modernizr.js"></script> </head> <body> <ul class="skiplinks"> <li><a href="#main">Skip to primary content</a></li> <li><a href="#nav-bottom">Skip to navigation</a></li> </ul> <header id="header"> <a href="{{site.baseurl}}/index.html" rel="home"><img class="logo" src="{{site.baseurl}}/img/cyclopedia.svg" alt="cyclopedia logo"></a> <a href="#nav-bottom" class="nav-btn" data-state="disengaged"></a> {% include nav.html navclass='nav-top'%} </header> <div role="main" id="main" class="main-content"> {{content}} </div> <footer> {% include nav.html navclass='nav-bottom'%} <p role="contentinfo">© Chris Chan, BSD-3 Clause License</p> </footer> <script src="{{site.baseurl}}/js/$.min.js"></script> <script src="{{site.baseurl}}/js/navigation.js"></script> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-46026628-1', 'chris-noel-chan.github.io'); ga('send', 'pageview'); </script> </body> </html>
namasteChris/large-data-website
_layouts/default.html
HTML
bsd-3-clause
2,892
{% extends "base.html" %} {% block bodyclass %}about{% endblock %} {% block secondary_nav %} <li><a href="/about/">About this site</a></li> <li><a href="/about/faq/">FAQ</a></li> <li><a href="/about/tos/">Terms of Service</a></li> <li><a href="/about/legal/">Legal</a></li> {% endblock %}
yangjiandong/djangosnippets
cab/templates/base_about.html
HTML
bsd-3-clause
291
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUVNFThresholdPolicy(NURESTObject): """ Represents a VNFThresholdPolicy in the VSD Notes: VNF Threshold Policy represents thresholds for resources consumed by VNF instance running on NS Gateway and action to be taken when resource utilization crosses configured thresholds. """ __rest_name__ = "vnfthresholdpolicy" __resource_name__ = "vnfthresholdpolicies" ## Constants CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ACTION_SHUTOFF = "SHUTOFF" CONST_ACTION_NONE = "NONE" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): """ Initializes a VNFThresholdPolicy instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> vnfthresholdpolicy = NUVNFThresholdPolicy(id=u'xxxx-xxx-xxx-xxx', name=u'VNFThresholdPolicy') >>> vnfthresholdpolicy = NUVNFThresholdPolicy(data=my_dict) """ super(NUVNFThresholdPolicy, self).__init__() # Read/Write Attributes self._cpu_threshold = None self._name = None self._last_updated_by = None self._last_updated_date = None self._action = None self._memory_threshold = None self._description = None self._min_occurrence = None self._embedded_metadata = None self._entity_scope = None self._monit_interval = None self._creation_date = None self._assoc_entity_type = None self._storage_threshold = None self._owner = None self._external_id = None self.expose_attribute(local_name="cpu_threshold", remote_name="CPUThreshold", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="action", remote_name="action", attribute_type=str, is_required=False, is_unique=False, choices=[u'NONE', u'SHUTOFF']) self.expose_attribute(local_name="memory_threshold", remote_name="memoryThreshold", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="min_occurrence", remote_name="minOccurrence", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="monit_interval", remote_name="monitInterval", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="storage_threshold", remote_name="storageThreshold", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def cpu_threshold(self): """ Get cpu_threshold value. Notes: Threshold for CPU usage This attribute is named `CPUThreshold` in VSD API. """ return self._cpu_threshold @cpu_threshold.setter def cpu_threshold(self, value): """ Set cpu_threshold value. Notes: Threshold for CPU usage This attribute is named `CPUThreshold` in VSD API. """ self._cpu_threshold = value @property def name(self): """ Get name value. Notes: Name of VNF agent policy """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of VNF agent policy """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def action(self): """ Get action value. Notes: Action to be taken on threshold crossover """ return self._action @action.setter def action(self, value): """ Set action value. Notes: Action to be taken on threshold crossover """ self._action = value @property def memory_threshold(self): """ Get memory_threshold value. Notes: Threshold for memory usage This attribute is named `memoryThreshold` in VSD API. """ return self._memory_threshold @memory_threshold.setter def memory_threshold(self, value): """ Set memory_threshold value. Notes: Threshold for memory usage This attribute is named `memoryThreshold` in VSD API. """ self._memory_threshold = value @property def description(self): """ Get description value. Notes: Description of VNF agent policy """ return self._description @description.setter def description(self, value): """ Set description value. Notes: Description of VNF agent policy """ self._description = value @property def min_occurrence(self): """ Get min_occurrence value. Notes: Minimum number of threshold crossover occurrence during monitoring interval before taking specified action This attribute is named `minOccurrence` in VSD API. """ return self._min_occurrence @min_occurrence.setter def min_occurrence(self, value): """ Set min_occurrence value. Notes: Minimum number of threshold crossover occurrence during monitoring interval before taking specified action This attribute is named `minOccurrence` in VSD API. """ self._min_occurrence = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def monit_interval(self): """ Get monit_interval value. Notes: Monitoring interval (minutes) for threshold crossover occurrences to be considered This attribute is named `monitInterval` in VSD API. """ return self._monit_interval @monit_interval.setter def monit_interval(self, value): """ Set monit_interval value. Notes: Monitoring interval (minutes) for threshold crossover occurrences to be considered This attribute is named `monitInterval` in VSD API. """ self._monit_interval = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def assoc_entity_type(self): """ Get assoc_entity_type value. Notes: Type of the entity to which the Metadata is associated to. This attribute is named `assocEntityType` in VSD API. """ return self._assoc_entity_type @assoc_entity_type.setter def assoc_entity_type(self, value): """ Set assoc_entity_type value. Notes: Type of the entity to which the Metadata is associated to. This attribute is named `assocEntityType` in VSD API. """ self._assoc_entity_type = value @property def storage_threshold(self): """ Get storage_threshold value. Notes: Threshold for storage usage This attribute is named `storageThreshold` in VSD API. """ return self._storage_threshold @storage_threshold.setter def storage_threshold(self, value): """ Set storage_threshold value. Notes: Threshold for storage usage This attribute is named `storageThreshold` in VSD API. """ self._storage_threshold = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
nuagenetworks/vspk-python
vspk/v6/nuvnfthresholdpolicy.py
Python
bsd-3-clause
16,674
/* General warning: Beta-ish. Code could be a bit cleaner. */ .ui360, .ui360 * { position:relative; } .ui360, .sm2-360ui { /* size of the container for the circle, etc. */ width:50px; height:50px; } .ui360 { position:relative; /* a little extra spacing */ padding-top:1px; padding-bottom:1px; margin-bottom:-18px; /* approximate "line height" we want */ margin-left:42px; /* 50px, with a few off */ } .ui360 a { line-height:50px; } .sm2-360ui { margin-left:-50px; } .ui360 { width:auto; } .ui360, .ui360 * { vertical-align:middle; } .sm2-360ui { position:relative; display:inline-block; /* firefox 3 et al */ float:left; /* firefox 2 needs this, inline-block would work with fx3 and others */ *float:left; /* IE 6+7 */ *display:inline; *clear:left; } .sm2-360ui.sm2_playing, .sm2-360ui.sm2_paused { /* bump on top when active */ z-index:10; } .ui360 a.sm2_link { /* this class added to playable links by SM2 */ position:relative; } .ui360 a { color:#000; text-decoration:none; } .ui360 a, .ui360 a:hover, .ui360 a:focus { padding:2px; margin-left:-2px; margin-top:-2px; } .ui360 a:hover, .ui360 a:focus { background:#eee; -moz-border-radius:3px; -webkit-border-radius:3px; -khtml-border-radius:3px; border-radius:3px; outline:none; } .ui360 .sm2-canvas { position:absolute; left:0px; top:0px; } .ui360 .sm2-timing { position:absolute; display:block; left:0px; top:0px; width:100%; height:100%; margin:0px; font:11px "helvetica neue",helvetica,monaco,lucida,terminal,monospace; color:#666; text-align:center; line-height:50px; } .ui360 .sm2-timing.alignTweak { text-indent:1px; /* devious center-alignment tweak for Safari (might break things for others.) */ } .ui360 .sm2-cover { position:absolute; left:0px; top:0px; z-index:2; display:none; } .ui360 .sm2-360btn { position:absolute; top:50%; left:50%; width:22px; height:22px; margin-left:-11px; margin-top:-11px; cursor:pointer; z-index:3; } .ui360 .sm2-360btn-default { } .ui360 .sm2-360data { display:inline-block; font-family:helvetica; } .ui360 .sm2-360ui.sm2_playing .sm2-cover, .ui360 .sm2-360ui.sm2_paused .sm2-cover { display:block; } /* this could be optimized a fair bit. */ .ui360 .sm2-360btn-default { background:transparent url(360-button-play.png) no-repeat 50% 50%; _background:transparent url(360-button-play.gif) no-repeat 50% 50%; /* IE 6-only: special crap GIF */ cursor:pointer; } .ui360 .sm2-360ui.sm2_paused .sm2-360btn { background:transparent url(360-button-play.png) no-repeat 50% 50%; _background:transparent url(360-button-play.gif) no-repeat 50% 50%; cursor:pointer; } .ui360 .sm2-360btn-default:hover, .ui360 .sm2-360ui.sm2_paused .sm2-360btn:hover { background:transparent url(360-button-play-light.png) no-repeat 50% 50%; _background:transparent url(360-button-play.gif) no-repeat 50% 50%; cursor:pointer; } .ui360 .sm2-360ui.sm2_playing .sm2-360btn:hover, .ui360 .sm2-360btn-playing:hover { background:transparent url(360-button-pause-light.png) no-repeat 50% 50%; _background:transparent url(360-button-pause-light.gif) no-repeat 50% 50%; cursor:pointer; } .ui360 .sm2-360ui.sm2_playing .sm2-timing { visibility:visible; } .ui360 .sm2-360ui.sm2_buffering .sm2-timing { visibility:hidden; } .ui360 .sm2-360ui .sm2-timing, .ui360 .sm2-360ui .sm2-360btn:hover + .sm2-timing, .ui360 .sm2-360ui.sm2_paused .sm2-timing { visibility:hidden; } .ui360 .sm2-360ui.sm2_dragging .sm2-timing, .ui360 .sm2-360ui.sm2_dragging .sm2-360btn:hover + .sm2-timing { /* paused + dragging */ visibility:visible; } .ui360 .sm2-360ui.sm2_playing .sm2-360btn, x.ui360 .sm2-360btn-playing, .ui360 .sm2-360ui.sm2_dragging .sm2-360btn, .ui360 .sm2-360ui.sm2_dragging .sm2-360btn:hover, .ui360 .sm2-360ui.sm2_dragging .sm2-360btn-playing:hover { /* don't let pause button show on hover when dragging (or paused and dragging) */ background:transparent; cursor:auto; } .ui360 .sm2-360ui.sm2_buffering .sm2-360btn, .ui360 .sm2-360ui.sm2_buffering .sm2-360btn:hover { background:transparent url(icon_loading_spinner.gif) no-repeat 50% 50%; opacity:0.5; visibility:visible; } /* inline list style */ .sm2-inline-list .ui360, .sm2-inline-block .ui360 { position:relative; display:inline-block; float:left; _display:inline; margin-bottom:-15px; } .sm2-inline-list .ui360 { margin-bottom:0px; } .sm2-inline-block .ui360 { margin-right:8px; } .sm2-inline-list .ui360 a { display:none; } /* annotations */ ul.ui360playlist { list-style-type:none; } ul.ui360playlist, ul.ui360playlist li { margin:0px; padding:0px; } div.ui360 div.metadata { display:none; } div.ui360 a span.metadata, div.ui360 a span.metadata * { /* name of track, note etc. */ vertical-align:baseline; }
jwheare/soundmanager2
demo/360-player/360player.css
CSS
bsd-3-clause
5,016
<?php /** * This file is part of prooph/pdo-event-store. * (c) 2016-2022 Alexander Miertsch <kontakt@codeliner.ws> * (c) 2016-2022 Sascha-Oliver Prolic <saschaprolic@googlemail.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ declare(strict_types=1); namespace ProophTest\EventStore\Pdo\Projection; use Prooph\Common\Messaging\FQCNMessageFactory; use Prooph\EventStore\Pdo\PersistenceStrategy\PostgresSimpleStreamStrategy; use Prooph\EventStore\Pdo\PostgresEventStore; use Prooph\EventStore\Pdo\Projection\PostgresProjectionManager; use Prooph\EventStore\Pdo\Util\PostgresHelper; use ProophTest\EventStore\Mock\ReadModelMock; use ProophTest\EventStore\Mock\UserCreated; use ProophTest\EventStore\Pdo\TestUtil; /** * @group postgres */ class PostgresEventStoreReadModelProjectorCustomSchemaTest extends PdoEventStoreReadModelProjectorCustomSchemaTest { use PostgresHelper; protected function setUp(): void { if (TestUtil::getDatabaseDriver() !== 'pdo_pgsql') { throw new \RuntimeException('Invalid database vendor'); } $this->connection = TestUtil::getConnection(); TestUtil::initCustomSchemaDatabaseTables($this->connection); $this->eventStore = new PostgresEventStore( new FQCNMessageFactory(), TestUtil::getConnection(), new PostgresSimpleStreamStrategy(), 10000, $this->eventStreamsTable() ); $this->projectionManager = new PostgresProjectionManager( $this->eventStore, $this->connection, $this->eventStreamsTable(), $this->projectionsTable() ); } /** * @test */ public function it_handles_missing_projection_table(): void { $this->expectException(\Prooph\EventStore\Pdo\Exception\RuntimeException::class); $this->expectExceptionMessage("Error 42P01. Maybe the projection table is not setup?\nError-Info: ERROR: relation \"{$this->projectionsTable()}\" does not exist\nLINE 1: SELECT status FROM"); $this->prepareEventStream('prooph.user-123'); $this->connection->exec("DROP TABLE {$this->quoteIdent($this->projectionsTable())};"); $projection = $this->projectionManager->createReadModelProjection('test_projection', new ReadModelMock()); $projection ->fromStream('prooph.user-123') ->when([ UserCreated::class => function (array $state, UserCreated $event): array { $this->stop(); return $state; }, ]) ->run(); } }
prooph/pdo-event-store
tests/Projection/PostgresEventStoreReadModelProjectorCustomSchemaTest.php
PHP
bsd-3-clause
2,725
/*================================================================================ Copyright (c) 2013 Steve Jin. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of VMware, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================*/ package com.vmware.vim25; /** * @author Steve Jin (http://www.doublecloud.org) * @version 5.1 */ @SuppressWarnings("all") public class OvfCpuCompatibilityCheckNotSupported extends OvfImport { }
paksv/vijava
src/com/vmware/vim25/OvfCpuCompatibilityCheckNotSupported.java
Java
bsd-3-clause
1,837
<?php namespace Admin\Form; use Zend\Form\Form; class SearchForm extends Form { public function __construct($name = null) { // we want to ignore the name passed parent::__construct('search'); $this->setAttribute('method', 'get'); $this->add(array( 'name' => 'action', 'attributes' => array( 'type' => 'hidden', 'value' => 'search' ), 'options' => array( 'label' => 'action', ), )); $this->add(array( 'name' => 'query', 'attributes' => array( 'type' => 'text', 'class' => 'text-input small-input', ), 'options' => array( 'label' => 'Text', ), )); $this->add(array( 'type' => 'Zend\Form\Element\Select', 'name' => 'visible', 'attributes' => array( 'class' => 'small-input', 'options' => array( '' => 'All', '1' => 'Yes', '0' => 'No', ), ), 'options' => array( 'label' => 'Visible', ), )); } }
mafia99/Abbelire
module/Admin/src/Admin/Form/SearchForm.php
PHP
bsd-3-clause
1,049
Pop PHP Framework ================= Documentation : Http -------------------- Home Der HTTP-Komponente bietet eine einfach zu bedienende API zu verwalten, Zugriff und Manipulation HTTP Anfragen und Antworten. Es ist in mehrere Komponenten verwendet, ist aber den meisten eng mit der MVC-Komponente zur Verwaltung von Anfragen und Antworten in diesem Bauteil integriert. use Pop\Http\Request, Pop\Http\Response; // Create a request object and access the data and information $request = new Request(); echo $request->getRequestUri(); if ($request->isPost()) { print_r($request->getPost()); } // Create a response object and send $response = new Response(); $response->setHeader('content-type', 'text/html') ->setBody('<html><body>This is some HTML.</body></html>') ->send(); \(c) 2009-2014 [Moc 10 Media, LLC.](http://www.moc10media.com) All Rights Reserved.
nicksagona/PopPHP
vendor/PopPHPFramework/docs/md/de/Http.md
Markdown
bsd-3-clause
942
<html> <head> <meta http-equiv="Content-type" content="text/html; charset=utf-8"> <title>Set tracking cookie</title> </head> <body onLoad="javascript:pageTracker._setVar('sr_internal');"> <p>cookie set</p> <script type="text/javascript"> var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www."); document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E")); </script> <script type="text/javascript"> try { var pageTracker = _gat._getTracker("UA-253253-4"); pageTracker._trackPageview(); } catch(err) {}</script> </body> </html>
smithandrobot/smithrobot-website
smithandrobot/templates/tracking_cookie.html
HTML
bsd-3-clause
644
# pyhtslib -- HTS file access from Python The Python library pyhtslib is a wrapper around [htslib](http://www.htslib.org/), a C library for accessing files used for high-throughput sequencing (HTS) file formats. The aim of pyhtslib is to provide the I/O functionality from htslib through an easy-to-use and well-tested Python interface. ## Status Read access to the following formats works: - SAM/BAM -- sequential and indexed (BAM through `.bai` files, SAM through tabix indices) - VCF/BCF -- sequential and indexed (BCF through `.csi` files, VCF through tabix indices) - tabix -- reading of arbitrary TSV files - FAI -- indexed FASTA What is missing: - writing of files [v0.6] - CRAM support [v0.5] - sequential FASTA and FASTQ through the `kseq.h` library [v0.4] Other things on the roadmap for a v1.0: - support for Python 2 (a lot of Python Bioinformatics tools have not moved to Python 3 yet) [v0.2] - comprehensive documentation [v0.2] - switching to Cython instead of using `ctypes` [v0.3] The plan is then to support the more advanced features of htslib: - support for `bcf\_synced\_reader` [v1.2] - more lazy loading of VCF properties [v1.1] - pileup functionality [v1.3] - reading of multiple sorted BAM files at the same time [v1.4] ## Contributors - Manuel Holtgrewe, Berlin Institute of Health/Charite University Medicine Berlin
holtgrewe/pyhtslib
README.md
Markdown
bsd-3-clause
1,356
from __future__ import division, print_function, absolute_import #from pnet.vzlog import default as vz import numpy as np import amitgroup as ag import itertools as itr import sys import os #import gv import pnet import time def test(ims, labels, net): yhat = net.classify(ims) return yhat == labels if pnet.parallel.main(__name__): print("1") import argparse parser = argparse.ArgumentParser() #parser.add_argument('seed', metavar='<seed>', type=int, help='Random seed') parser.add_argument('param', metavar='<param>', type=float) args0 = parser.parse_args() param = args0.param #for i in xrange(1, 7): # print(make_support(i, 4).astype(np.uint8)) #params = randomize_layers_parameters(args0.seed) #print(params) unsup_training_times = [] sup_training_times = [] testing_times = [] error_rates = [] all_num_parts = [] maxdepth = 7 print("2") # Switch which experiment here #from pnet.mnist_danny import parse_background_random as loadf from pnet.mnist_danny import parse_background_images as loadf print("Loading...") mnist_data = loadf() print("Done.") for training_seed in xrange(1): layers = [ #pnet.IntensityThresholdLayer(), pnet.EdgeLayer(k=5, radius=1, spread='orthogonal', minimum_contrast=0.05),#, pre_blurring=1.0), #pnet.IntensityThresholdLayer(), #pnet.IntensityThresholdLayer(), pnet.PartsLayer(250, (7, 7), settings=dict(outer_frame=1, em_seed=training_seed, threshold=2, samples_per_image=60, max_samples=200000, train_limit=10000, min_prob=0.00005, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] if 0: layers += [ pnet.RandomForestPartsLayer(256, (5, 5), settings=dict(outer_frame=1, em_seed=training_seed, threshold=2, samples_per_image=40, max_samples=200000, train_limit=10000, min_prob=0.0005, trees=10, max_depth=3, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] elif 0: layers += [ pnet.GaussianPartsLayer(100, (5, 5), settings=dict( em_seed=training_seed, samples_per_image=40, max_samples=200000, train_limit=100000, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] elif 0: layers += [ pnet.PartsLayer(1000, (5, 5), settings=dict(outer_frame=1, em_seed=training_seed, threshold=2, samples_per_image=40, max_samples=200000, train_limit=100000, min_prob=0.0005, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] elif 0: layers += [ pnet.BinaryTreePartsLayer(maxdepth, (5, 5), settings=dict(outer_frame=1, em_seed=training_seed, threshold=2, samples_per_image=40, max_samples=200000, train_limit=10000, min_prob=0.005, #keypoint_suppress_radius=1, min_samples_per_part=50, split_criterion='IG', split_entropy=0.2, min_information_gain=0.01, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] elif 0: layers += [ pnet.BinaryTreePartsLayer(maxdepth, (5, 5), settings=dict(outer_frame=1, em_seed=training_seed, threshold=2, samples_per_image=40, max_samples=200000, train_limit=10000, min_prob=0.0005, #keypoint_suppress_radius=1, min_samples_per_part=50, split_criterion=split_criterion, split_entropy=split_entropy, min_information_gain=split_entropy, )), pnet.PoolingLayer(shape=(4, 4), strides=(4, 4)), ] [ pnet.BinaryTreePartsLayer(10, (1, 1), settings=dict(outer_frame=0, em_seed=training_seed+1, threshold=1, samples_per_image=200, max_samples=1000000, train_limit=10000, #min_information_gain=0.05, split_entropy=0.05, min_prob=0.0005 )), pnet.PoolingLayer(shape=(1, 1), strides=(1, 1)), ] layers += [ pnet.MixtureClassificationLayer(n_components=1, min_prob=1e-5), #pnet.SVMClassificationLayer(C=None), ] net = pnet.PartsNet(layers) TRAIN_SAMPLES = 10000 #TRAIN_SAMPLES = 1200 print(training_seed) digits = range(10) #ims = ag.io.load_mnist('training', selection=slice(0 + 3000 * training_seed, TRAIN_SAMPLES + 3000 * training_seed), return_labels=False) ims = mnist_data['training_image'][0 + 1000 * training_seed : TRAIN_SAMPLES + 1000 * training_seed] ims_label = mnist_data['training_label'][0 + 1000 * training_seed : TRAIN_SAMPLES + 1000 * training_seed] validation_ims = mnist_data['training_image'][10000:12000] validation_label = mnist_data['training_label'][10000:12000] #print(net.sizes(X[[0]])) print(ims.shape) start0 = time.time() net.train(ims) end0 = time.time() N = 1000 sup_ims = [] sup_labels = [] # Load supervised training data for d in digits: if N is None: ims0 = ims[ims_label == d] else: #ims0 = ag.io.load_mnist('training', [d], selection=slice(N*training_seed, N*(1+training_seed)), return_labels=False) ims0 = ims[ims_label == d] sup_ims.append(ims0) sup_labels.append(d * np.ones(len(ims0), dtype=np.int64)) sup_ims = np.concatenate(sup_ims, axis=0) sup_labels = np.concatenate(sup_labels, axis=0) #print('labels', np.bincount(sup_labels, minlength=10)) start1 = time.time() net.train(sup_ims, sup_labels) end1 = time.time() #print("Now testing...") ### Test ###################################################################### corrects = 0 total = 0 test_ims, test_labels = mnist_data['test_image'], mnist_data['test_label'] test_ims = validation_ims test_labels = validation_label # TEMP if 0: test_ims = test_ims[:1000] test_labels = test_labels[:1000] #with gv.Timer("Split to batches"): ims_batches = np.array_split(test_ims, 200) labels_batches = np.array_split(test_labels, 200) def format_error_rate(pr): return "{:.2f}%".format(100*(1-pr)) #import gv #with gv.Timer('Testing'): start2 = time.time() args = (tup+(net,) for tup in itr.izip(ims_batches, labels_batches)) for i, res in enumerate(pnet.parallel.starmap(test, args)): corrects += res.sum() total += res.size pr = corrects / total end2 = time.time() error_rate = 1.0 - pr num_parts = 0#net.layers[1].num_parts error_rates.append(error_rate) print(training_seed, 'error rate', error_rate * 100, 'num parts', num_parts)#, 'num parts 2', net.layers[3].num_parts) unsup_training_times.append(end0 - start0) sup_training_times.append(end1 - start1) testing_times.append(end2 - start2) #print('times', end0-start0, end1-start1, end2-start2) all_num_parts.append(num_parts) #vz.section('MNIST') #gv.img.save_image(vz.generate_filename(), test_ims[0]) #gv.img.save_image(vz.generate_filename(), test_ims[1]) #gv.img.save_image(vz.generate_filename(), test_ims[2]) # Vz #net.infoplot(vz) #vz.flush() net.save('tmp{}.npy'.format(training_seed)) print(r"{ppl} & {depth} & {num_parts} & {unsup_time:.1f} & {test_time:.1f} & ${rate:.2f} \pm {std:.2f}$ \\".format( ppl=2, depth=maxdepth, num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)), unsup_time=np.median(unsup_training_times) / 60, #sup_time=np.median(sup_training_times), test_time=np.median(testing_times) / 60, rate=100*np.mean(error_rates), std=100*np.std(error_rates))) print(r"{ppl} {depth} {num_parts} {unsup_time} {test_time} {rate} {std}".format( ppl=2, depth=maxdepth, num_parts=r'${:.0f} \pm {:.0f}$'.format(np.mean(all_num_parts), np.std(all_num_parts)), unsup_time=np.median(unsup_training_times) / 60, #sup_time=np.median(sup_training_times), test_time=np.median(testing_times) / 60, rate=100*np.mean(error_rates), std=100*np.std(error_rates))) #np.savez('gdata2-{}-{}-{}.npz'.format(maxdepth, split_criterion, split_entropy), all_num_parts=all_num_parts, unsup_time=unsup_training_times, test_time=testing_times, rates=error_rates) print('mean error rate', np.mean(error_rates) * 100) #net.save(args.model)
amitgroup/parts-net
scripts/train_and_test5.py
Python
bsd-3-clause
12,277
// ==LICENSE-BEGIN== // Copyright 2017 European Digital Reading Lab. All rights reserved. // Licensed to the Readium Foundation under one or more contributor license agreements. // Use of this source code is governed by a BSD-style license // that can be found in the LICENSE file exposed on Github (readium) in the project repository. // ==LICENSE-END import { debounce } from "debounce"; import { ipcRenderer } from "electron"; import { PDFDocumentProxy } from "readium-desktop/typings/pdf.js/display/api"; import { IEventPayload_R2_EVENT_WEBVIEW_KEYDOWN, IEventPayload_R2_EVENT_WEBVIEW_KEYUP, } from "@r2-navigator-js/electron/common/events"; import { eventBus } from "../common/eventBus"; import { IEventBusPdfPlayer, IPdfPlayerColumn, IPdfPlayerScale, IPdfPlayerView, } from "../common/pdfReader.type"; import { EventBus } from "./pdfEventBus"; // import { pdfReaderInit } from "./init"; import { getToc } from "./toc"; export interface IPdfState { view: IPdfPlayerView; scale: IPdfPlayerScale; column: IPdfPlayerColumn; lastPageNumber: number; displayPage: (pageNumber: number) => Promise<void>; } export type IPdfBus = IEventBusPdfPlayer; const pdfjsEventBus = new EventBus(); pdfjsEventBus.onAll((key: any) => (...arg: any[]) => console.log("PDFJS EVENTBUS", key, ...arg)); (window as any).pdfjsEventBus = pdfjsEventBus; const pdfDocument = new Promise<PDFDocumentProxy>((resolve) => pdfjsEventBus.on("__pdfdocument", (_pdfDocument: PDFDocumentProxy) => { resolve(_pdfDocument); })); function main() { const bus: IPdfBus = eventBus( (key, ...a) => { const data = { key: JSON.stringify(key), payload: JSON.stringify(a), }; ipcRenderer.sendToHost("pdf-eventbus", data); }, (ev) => { ipcRenderer.on("pdf-eventbus", (_event, message) => { try { const key = typeof message?.key !== "undefined" ? JSON.parse(message.key) : undefined; const data = typeof message?.payload !== "undefined" ? JSON.parse(message.payload) : []; console.log("ipcRenderer pdf-eventbus received", key, data); if (Array.isArray(data)) { ev(key, ...data); } } catch (e) { console.log("ipcRenderer pdf-eventbus received with parsing error", e); } }); }, ); const defaultView: IPdfPlayerView = "scrolled"; const defaultScale: IPdfPlayerScale = "page-fit"; const defaultCol: IPdfPlayerColumn = "1"; // start dispatched from webview dom ready bus.subscribe("start", async (pdfPath: string) => { pdfDocument.then(async (pdf) => { console.log("PDFDOC LOADED"); const toc = await getToc(pdf); console.log("TOC"); console.log(toc); bus.dispatch("toc", toc); bus.dispatch("numberofpages", pdf.numPages); }).catch((e) => console.error(e)); console.log("bus.subscribe start pdfPath", pdfPath); bus.dispatch("scale", defaultScale); bus.dispatch("view", defaultView); bus.dispatch("column", defaultCol); }); { pdfjsEventBus.on("__ready", () => { // send to reader.tsx ready to render pdf bus.dispatch("ready"); }); } // search { // https://github.com/mozilla/pdf.js/blob/c366390f6bb2fa303d0d85879afda2c27ee06c28/web/pdf_find_bar.js#L930 const dispatchEvent = (type: any, findPrev?: any) => { pdfjsEventBus.dispatch("find", { source: null, type, query: searchRequest, phraseSearch: true, caseSensitive: false, entireWord: false, highlightAll: true, findPrevious: findPrev, }); }; let searchRequest = ""; bus.subscribe("search", (txt) => { searchRequest = txt; dispatchEvent(""); }); bus.subscribe("search-next", () => { dispatchEvent("again", false); }); bus.subscribe("search-previous", () => { dispatchEvent("again", true); }); bus.subscribe("search-wipe", () => { pdfjsEventBus.dispatch("findbarclose", { source: null }); }); pdfjsEventBus.on("updatefindmatchescount", ({ matchesCount: { total = 0 /* current */ } }: any) => { bus.dispatch("search-found", total); }); } // spreadmode let colMode: IPdfPlayerColumn = defaultCol; { bus.subscribe("column", (col) => { pdfjsEventBus.dispatch("switchspreadmode", { mode: col === "auto" ? 0 : col === "1" ? 0 : 1 }); // 1 = odd 2 = even bus.dispatch("column", col); colMode = col; }); } const p = new Promise<void>((resolve) => pdfjsEventBus.on("documentloaded", resolve)); // pagechange { bus.subscribe("page", (pageNumber) => { console.log("pageNumber from host", pageNumber); // tslint:disable-next-line: no-floating-promises p.then(() => { pdfjsEventBus.dispatch("pagenumberchanged", { source: null, value: pageNumber.toString(), }); }); }); const debounceUpdateviewarea = debounce(async (evt: any) => { try { const { location: { pageNumber } } = evt; console.log("pageNumber", pageNumber); bus.dispatch("page", pageNumber); } catch (e) { console.log("updateviewarea ERROR", e); } }, 500); pdfjsEventBus.on("updateviewarea", async (evt: any) => { await debounceUpdateviewarea(evt); }); bus.subscribe("page-next", () => { if (colMode === "2") { pdfjsEventBus.dispatch("nextpage"); } pdfjsEventBus.dispatch("nextpage"); }); bus.subscribe("page-previous", () => { if (colMode === "2") { pdfjsEventBus.dispatch("previouspage"); } pdfjsEventBus.dispatch("previouspage"); }); } // view let lockViewMode = false; { bus.subscribe("view", (view) => { if (view === "paginated") { pdfjsEventBus.dispatch("scalechanged", { value: "page-fit" }); bus.dispatch("scale", "page-fit"); document.body.className = "hidescrollbar"; lockViewMode = true; } else if (view === "scrolled") { document.body.className = ""; lockViewMode = false; } bus.dispatch("view", view); }); } // scale { bus.subscribe("scale", (scale) => { if (!lockViewMode) { pdfjsEventBus.dispatch("scalechanged", { value: typeof scale === "number" ? `${scale / 100}` : scale }); bus.dispatch("scale", scale); } }); pdfjsEventBus.on("scalechanging", ({/*_scale, */ presetValue }: any) => bus.dispatch("scale", presetValue)); } window.document.body.addEventListener("copy", (evt: ClipboardEvent) => { const selection = window.document.getSelection(); if (selection) { const str = selection.toString(); if (str) { evt.preventDefault(); setTimeout(() => { bus.dispatch("copy", str); }, 500); } } }); window.document.documentElement.addEventListener("keydown", (_ev: KeyboardEvent) => { window.document.documentElement.classList.add("ROOT_CLASS_KEYBOARD_INTERACT"); }, true); window.document.documentElement.addEventListener("mousedown", (_ev: MouseEvent) => { window.document.documentElement.classList.remove("ROOT_CLASS_KEYBOARD_INTERACT"); }, true); const keyDownUpEventHandler = (name: "keydown" | "keyup") => (ev: KeyboardEvent) => { const elementName = (ev.target && (ev.target as Element).nodeName) ? (ev.target as Element).nodeName : ""; const elementAttributes: { [name: string]: string } = {}; if (ev.target && (ev.target as Element).attributes) { // tslint:disable-next-line: prefer-for-of for (let i = 0; i < (ev.target as Element).attributes.length; i++) { const attr = (ev.target as Element).attributes[i]; elementAttributes[attr.name] = attr.value; } } const payload = { altKey: ev.altKey, code: ev.code, ctrlKey: ev.ctrlKey, elementAttributes, elementName, key: ev.key, metaKey: ev.metaKey, shiftKey: ev.shiftKey, } as IEventPayload_R2_EVENT_WEBVIEW_KEYDOWN | IEventPayload_R2_EVENT_WEBVIEW_KEYUP; bus.dispatch(name, payload); }; window.document.addEventListener("keydown", keyDownUpEventHandler("keydown"), { capture: true, once: false, passive: false, }); window.document.addEventListener("keyup", keyDownUpEventHandler("keyup"), { capture: true, once: false, passive: false, }); } document.addEventListener("DOMContentLoaded", () => { main(); });
edrlab/readium-desktop
src/renderer/reader/pdf/webview/index_pdf.ts
TypeScript
bsd-3-clause
9,792
{- (c) The University of Glasgow 2011 The deriving code for the Generic class (equivalent to the code in TcGenDeriv, for other classes) -} {-# LANGUAGE CPP, ScopedTypeVariables, TupleSections #-} {-# LANGUAGE FlexibleContexts #-} module TcGenGenerics (canDoGenerics, canDoGenerics1, GenericKind(..), MetaTyCons, genGenericMetaTyCons, gen_Generic_binds, get_gen1_constrained_tys) where import DynFlags import HsSyn import Type import Kind ( isKind ) import TcType import TcGenDeriv import DataCon import TyCon import FamInstEnv ( FamInst, FamFlavor(..), mkSingleCoAxiom ) import FamInst import Module ( Module, moduleName, moduleNameString , modulePackageKey, packageKeyString, getModule ) import IfaceEnv ( newGlobalBinder ) import Name hiding ( varName ) import RdrName import BasicTypes import TysWiredIn import PrelNames import InstEnv import TcEnv import MkId import TcRnMonad import HscTypes import ErrUtils( Validity(..), andValid ) import BuildTyCl import SrcLoc import Bag import VarSet (elemVarSet) import Outputable import FastString import Util import Control.Monad (mplus,forM) #include "HsVersions.h" {- ************************************************************************ * * \subsection{Bindings for the new generic deriving mechanism} * * ************************************************************************ For the generic representation we need to generate: \begin{itemize} \item A Generic instance \item A Rep type instance \item Many auxiliary datatypes and instances for them (for the meta-information) \end{itemize} -} gen_Generic_binds :: GenericKind -> TyCon -> MetaTyCons -> Module -> TcM (LHsBinds RdrName, FamInst) gen_Generic_binds gk tc metaTyCons mod = do repTyInsts <- tc_mkRepFamInsts gk tc metaTyCons mod return (mkBindsRep gk tc, repTyInsts) genGenericMetaTyCons :: TyCon -> TcM (MetaTyCons, BagDerivStuff) genGenericMetaTyCons tc = do let tc_name = tyConName tc mod = nameModule tc_name tc_cons = tyConDataCons tc tc_arits = map dataConSourceArity tc_cons tc_occ = nameOccName tc_name d_occ = mkGenD mod tc_occ c_occ m = mkGenC mod tc_occ m s_occ m n = mkGenS mod tc_occ m n mkTyCon name = ASSERT( isExternalName name ) buildAlgTyCon name [] [] Nothing [] distinctAbstractTyConRhs NonRecursive False -- Not promotable False -- Not GADT syntax NoParentTyCon loc <- getSrcSpanM -- we generate new names in current module currentMod <- getModule d_name <- newGlobalBinder currentMod d_occ loc c_names <- forM (zip [0..] tc_cons) $ \(m,_) -> newGlobalBinder currentMod (c_occ m) loc s_names <- forM (zip [0..] tc_arits) $ \(m,a) -> forM [0..a-1] $ \n -> newGlobalBinder currentMod (s_occ m n) loc let metaDTyCon = mkTyCon d_name metaCTyCons = map mkTyCon c_names metaSTyCons = map (map mkTyCon) s_names metaDts = MetaTyCons metaDTyCon metaCTyCons metaSTyCons (,) metaDts `fmap` metaTyConsToDerivStuff tc metaDts -- both the tycon declarations and related instances metaTyConsToDerivStuff :: TyCon -> MetaTyCons -> TcM BagDerivStuff metaTyConsToDerivStuff tc metaDts = do dflags <- getDynFlags dClas <- tcLookupClass datatypeClassName d_dfun_name <- newDFunName' dClas tc cClas <- tcLookupClass constructorClassName c_dfun_names <- sequence [ (conTy,) <$> newDFunName' cClas tc | conTy <- metaC metaDts ] sClas <- tcLookupClass selectorClassName s_dfun_names <- sequence (map sequence [ [ (selector,) <$> newDFunName' sClas tc | selector <- selectors ] | selectors <- metaS metaDts ]) fix_env <- getFixityEnv let (dBinds,cBinds,sBinds) = mkBindsMetaD fix_env tc mk_inst clas tc dfun_name = mkLocalInstance (mkDictFunId dfun_name [] [] clas tys) OverlapFlag { overlapMode = (NoOverlap "") , isSafeOverlap = safeLanguageOn dflags } [] clas tys where tys = [mkTyConTy tc] -- Datatype d_metaTycon = metaD metaDts d_inst = mk_inst dClas d_metaTycon d_dfun_name d_binds = InstBindings { ib_binds = dBinds , ib_tyvars = [] , ib_pragmas = [] , ib_extensions = [] , ib_derived = True } d_mkInst = DerivInst (InstInfo { iSpec = d_inst, iBinds = d_binds }) -- Constructor c_insts = [ mk_inst cClas c ds | (c, ds) <- c_dfun_names ] c_binds = [ InstBindings { ib_binds = c , ib_tyvars = [] , ib_pragmas = [] , ib_extensions = [] , ib_derived = True } | c <- cBinds ] c_mkInst = [ DerivInst (InstInfo { iSpec = is, iBinds = bs }) | (is,bs) <- myZip1 c_insts c_binds ] -- Selector s_insts = map (map (\(s,ds) -> mk_inst sClas s ds)) s_dfun_names s_binds = [ [ InstBindings { ib_binds = s , ib_tyvars = [] , ib_pragmas = [] , ib_extensions = [] , ib_derived = True } | s <- ss ] | ss <- sBinds ] s_mkInst = map (map (\(is,bs) -> DerivInst (InstInfo { iSpec = is , iBinds = bs}))) (myZip2 s_insts s_binds) myZip1 :: [a] -> [b] -> [(a,b)] myZip1 l1 l2 = ASSERT(length l1 == length l2) zip l1 l2 myZip2 :: [[a]] -> [[b]] -> [[(a,b)]] myZip2 l1 l2 = ASSERT(and (zipWith (>=) (map length l1) (map length l2))) [ zip x1 x2 | (x1,x2) <- zip l1 l2 ] return $ mapBag DerivTyCon (metaTyCons2TyCons metaDts) `unionBags` listToBag (d_mkInst : c_mkInst ++ concat s_mkInst) {- ************************************************************************ * * \subsection{Generating representation types} * * ************************************************************************ -} get_gen1_constrained_tys :: TyVar -> Type -> [Type] -- called by TcDeriv.inferConstraints; generates a list of types, each of which -- must be a Functor in order for the Generic1 instance to work. get_gen1_constrained_tys argVar = argTyFold argVar $ ArgTyAlg { ata_rec0 = const [] , ata_par1 = [], ata_rec1 = const [] , ata_comp = (:) } {- Note [Requirements for deriving Generic and Rep] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the following, T, Tfun, and Targ are "meta-variables" ranging over type expressions. (Generic T) and (Rep T) are derivable for some type expression T if the following constraints are satisfied. (a) T = (D v1 ... vn) with free variables v1, v2, ..., vn where n >= 0 v1 ... vn are distinct type variables. Cf #5939. (b) D is a type constructor *value*. In other words, D is either a type constructor or it is equivalent to the head of a data family instance (up to alpha-renaming). (c) D cannot have a "stupid context". (d) The right-hand side of D cannot include unboxed types, existential types, or universally quantified types. (e) T :: *. (Generic1 T) and (Rep1 T) are derivable for some type expression T if the following constraints are satisfied. (a),(b),(c),(d) As above. (f) T must expect arguments, and its last parameter must have kind *. We use `a' to denote the parameter of D that corresponds to the last parameter of T. (g) For any type-level application (Tfun Targ) in the right-hand side of D where the head of Tfun is not a tuple constructor: (b1) `a' must not occur in Tfun. (b2) If `a' occurs in Targ, then Tfun :: * -> *. -} canDoGenerics :: TyCon -> [Type] -> Validity -- canDoGenerics rep_tc tc_args determines if Generic/Rep can be derived for a -- type expression (rep_tc tc_arg0 tc_arg1 ... tc_argn). -- -- Check (b) from Note [Requirements for deriving Generic and Rep] is taken -- care of because canDoGenerics is applied to rep tycons. -- -- It returns Nothing if deriving is possible. It returns (Just reason) if not. canDoGenerics tc tc_args = mergeErrors ( -- Check (c) from Note [Requirements for deriving Generic and Rep]. (if (not (null (tyConStupidTheta tc))) then (NotValid (tc_name <+> text "must not have a datatype context")) else IsValid) : -- Check (a) from Note [Requirements for deriving Generic and Rep]. -- -- Data family indices can be instantiated; the `tc_args` here are -- the representation tycon args (if (all isTyVarTy (filterOut isKind tc_args)) then IsValid else NotValid (tc_name <+> text "must not be instantiated;" <+> text "try deriving `" <> tc_name <+> tc_tys <> text "' instead")) -- See comment below : (map bad_con (tyConDataCons tc))) where -- The tc can be a representation tycon. When we want to display it to the -- user (in an error message) we should print its parent (tc_name, tc_tys) = case tyConParent tc of FamInstTyCon _ ptc tys -> (ppr ptc, hsep (map ppr (tys ++ drop (length tys) tc_args))) _ -> (ppr tc, hsep (map ppr (tyConTyVars tc))) -- Check (d) from Note [Requirements for deriving Generic and Rep]. -- -- If any of the constructors has an unboxed type as argument, -- then we can't build the embedding-projection pair, because -- it relies on instantiating *polymorphic* sum and product types -- at the argument types of the constructors bad_con dc = if (any bad_arg_type (dataConOrigArgTys dc)) then (NotValid (ppr dc <+> text "must not have unlifted or polymorphic arguments")) else (if (not (isVanillaDataCon dc)) then (NotValid (ppr dc <+> text "must be a vanilla data constructor")) else IsValid) -- Nor can we do the job if it's an existential data constructor, -- Nor if the args are polymorphic types (I don't think) bad_arg_type ty = isUnLiftedType ty || not (isTauTy ty) mergeErrors :: [Validity] -> Validity mergeErrors [] = IsValid mergeErrors (NotValid s:t) = case mergeErrors t of IsValid -> NotValid s NotValid s' -> NotValid (s <> text ", and" $$ s') mergeErrors (IsValid : t) = mergeErrors t -- A datatype used only inside of canDoGenerics1. It's the result of analysing -- a type term. data Check_for_CanDoGenerics1 = CCDG1 { _ccdg1_hasParam :: Bool -- does the parameter of interest occurs in -- this type? , _ccdg1_errors :: Validity -- errors generated by this type } {- Note [degenerate use of FFoldType] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use foldDataConArgs here only for its ability to treat tuples specially. foldDataConArgs also tracks covariance (though it assumes all higher-order type parameters are covariant) and has hooks for special handling of functions and polytypes, but we do *not* use those. The key issue is that Generic1 deriving currently offers no sophisticated support for functions. For example, we cannot handle data F a = F ((a -> Int) -> Int) even though a is occurring covariantly. In fact, our rule is harsh: a is simply not allowed to occur within the first argument of (->). We treat (->) the same as any other non-tuple tycon. Unfortunately, this means we have to track "the parameter occurs in this type" explicitly, even though foldDataConArgs is also doing this internally. -} -- canDoGenerics1 rep_tc tc_args determines if a Generic1/Rep1 can be derived -- for a type expression (rep_tc tc_arg0 tc_arg1 ... tc_argn). -- -- Checks (a) through (d) from Note [Requirements for deriving Generic and Rep] -- are taken care of by the call to canDoGenerics. -- -- It returns Nothing if deriving is possible. It returns (Just reason) if not. canDoGenerics1 :: TyCon -> [Type] -> Validity canDoGenerics1 rep_tc tc_args = canDoGenerics rep_tc tc_args `andValid` additionalChecks where additionalChecks -- check (f) from Note [Requirements for deriving Generic and Rep] | null (tyConTyVars rep_tc) = NotValid $ ptext (sLit "Data type") <+> quotes (ppr rep_tc) <+> ptext (sLit "must have some type parameters") | otherwise = mergeErrors $ concatMap check_con data_cons data_cons = tyConDataCons rep_tc check_con con = case check_vanilla con of j@(NotValid {}) -> [j] IsValid -> _ccdg1_errors `map` foldDataConArgs (ft_check con) con bad :: DataCon -> SDoc -> SDoc bad con msg = ptext (sLit "Constructor") <+> quotes (ppr con) <+> msg check_vanilla :: DataCon -> Validity check_vanilla con | isVanillaDataCon con = IsValid | otherwise = NotValid (bad con existential) bmzero = CCDG1 False IsValid bmbad con s = CCDG1 True $ NotValid $ bad con s bmplus (CCDG1 b1 m1) (CCDG1 b2 m2) = CCDG1 (b1 || b2) (m1 `andValid` m2) -- check (g) from Note [degenerate use of FFoldType] ft_check :: DataCon -> FFoldType Check_for_CanDoGenerics1 ft_check con = FT { ft_triv = bmzero , ft_var = caseVar, ft_co_var = caseVar -- (component_0,component_1,...,component_n) , ft_tup = \_ components -> if any _ccdg1_hasParam (init components) then bmbad con wrong_arg else foldr bmplus bmzero components -- (dom -> rng), where the head of ty is not a tuple tycon , ft_fun = \dom rng -> -- cf #8516 if _ccdg1_hasParam dom then bmbad con wrong_arg else bmplus dom rng -- (ty arg), where head of ty is neither (->) nor a tuple constructor and -- the parameter of interest does not occur in ty , ft_ty_app = \_ arg -> arg , ft_bad_app = bmbad con wrong_arg , ft_forall = \_ body -> body -- polytypes are handled elsewhere } where caseVar = CCDG1 True IsValid existential = text "must not have existential arguments" wrong_arg = text "applies a type to an argument involving the last parameter" $$ text "but the applied type is not of kind * -> *" {- ************************************************************************ * * \subsection{Generating the RHS of a generic default method} * * ************************************************************************ -} type US = Int -- Local unique supply, just a plain Int type Alt = (LPat RdrName, LHsExpr RdrName) -- GenericKind serves to mark if a datatype derives Generic (Gen0) or -- Generic1 (Gen1). data GenericKind = Gen0 | Gen1 -- as above, but with a payload of the TyCon's name for "the" parameter data GenericKind_ = Gen0_ | Gen1_ TyVar -- as above, but using a single datacon's name for "the" parameter data GenericKind_DC = Gen0_DC | Gen1_DC TyVar forgetArgVar :: GenericKind_DC -> GenericKind forgetArgVar Gen0_DC = Gen0 forgetArgVar Gen1_DC{} = Gen1 -- When working only within a single datacon, "the" parameter's name should -- match that datacon's name for it. gk2gkDC :: GenericKind_ -> DataCon -> GenericKind_DC gk2gkDC Gen0_ _ = Gen0_DC gk2gkDC Gen1_{} d = Gen1_DC $ last $ dataConUnivTyVars d -- Bindings for the Generic instance mkBindsRep :: GenericKind -> TyCon -> LHsBinds RdrName mkBindsRep gk tycon = unitBag (mkRdrFunBind (L loc from01_RDR) from_matches) `unionBags` unitBag (mkRdrFunBind (L loc to01_RDR) to_matches) where from_matches = [mkSimpleHsAlt pat rhs | (pat,rhs) <- from_alts] to_matches = [mkSimpleHsAlt pat rhs | (pat,rhs) <- to_alts ] loc = srcLocSpan (getSrcLoc tycon) datacons = tyConDataCons tycon (from01_RDR, to01_RDR) = case gk of Gen0 -> (from_RDR, to_RDR) Gen1 -> (from1_RDR, to1_RDR) -- Recurse over the sum first from_alts, to_alts :: [Alt] (from_alts, to_alts) = mkSum gk_ (1 :: US) tycon datacons where gk_ = case gk of Gen0 -> Gen0_ Gen1 -> ASSERT(length tyvars >= 1) Gen1_ (last tyvars) where tyvars = tyConTyVars tycon -------------------------------------------------------------------------------- -- The type synonym instance and synonym -- type instance Rep (D a b) = Rep_D a b -- type Rep_D a b = ...representation type for D ... -------------------------------------------------------------------------------- tc_mkRepFamInsts :: GenericKind -- Gen0 or Gen1 -> TyCon -- The type to generate representation for -> MetaTyCons -- Metadata datatypes to refer to -> Module -- Used as the location of the new RepTy -> TcM (FamInst) -- Generated representation0 coercion tc_mkRepFamInsts gk tycon metaDts mod = -- Consider the example input tycon `D`, where data D a b = D_ a -- Also consider `R:DInt`, where { data family D x y :: * -> * -- ; data instance D Int a b = D_ a } do { -- `rep` = GHC.Generics.Rep or GHC.Generics.Rep1 (type family) fam_tc <- case gk of Gen0 -> tcLookupTyCon repTyConName Gen1 -> tcLookupTyCon rep1TyConName ; let -- `tyvars` = [a,b] (tyvars, gk_) = case gk of Gen0 -> (all_tyvars, Gen0_) Gen1 -> ASSERT(not $ null all_tyvars) (init all_tyvars, Gen1_ $ last all_tyvars) where all_tyvars = tyConTyVars tycon tyvar_args = mkTyVarTys tyvars appT :: [Type] appT = case tyConFamInst_maybe tycon of -- `appT` = D Int a b (data families case) Just (famtycon, apps) -> -- `fam` = D -- `apps` = [Int, a, b] let allApps = case gk of Gen0 -> apps Gen1 -> ASSERT(not $ null apps) init apps in [mkTyConApp famtycon allApps] -- `appT` = D a b (normal case) Nothing -> [mkTyConApp tycon tyvar_args] -- `repTy` = D1 ... (C1 ... (S1 ... (Rec0 a))) :: * -> * ; repTy <- tc_mkRepTy gk_ tycon metaDts -- `rep_name` is a name we generate for the synonym ; rep_name <- let mkGen = case gk of Gen0 -> mkGenR; Gen1 -> mkGen1R in newGlobalBinder mod (mkGen (nameOccName (tyConName tycon))) (nameSrcSpan (tyConName tycon)) ; let axiom = mkSingleCoAxiom Nominal rep_name tyvars fam_tc appT repTy ; newFamInst SynFamilyInst axiom } -------------------------------------------------------------------------------- -- Type representation -------------------------------------------------------------------------------- -- | See documentation of 'argTyFold'; that function uses the fields of this -- type to interpret the structure of a type when that type is considered as an -- argument to a constructor that is being represented with 'Rep1'. data ArgTyAlg a = ArgTyAlg { ata_rec0 :: (Type -> a) , ata_par1 :: a, ata_rec1 :: (Type -> a) , ata_comp :: (Type -> a -> a) } -- | @argTyFold@ implements a generalised and safer variant of the @arg@ -- function from Figure 3 in <http://dreixel.net/research/pdf/gdmh.pdf>. @arg@ -- is conceptually equivalent to: -- -- > arg t = case t of -- > _ | isTyVar t -> if (t == argVar) then Par1 else Par0 t -- > App f [t'] | -- > representable1 f && -- > t' == argVar -> Rec1 f -- > App f [t'] | -- > representable1 f && -- > t' has tyvars -> f :.: (arg t') -- > _ -> Rec0 t -- -- where @argVar@ is the last type variable in the data type declaration we are -- finding the representation for. -- -- @argTyFold@ is more general than @arg@ because it uses 'ArgTyAlg' to -- abstract out the concrete invocations of @Par0@, @Rec0@, @Par1@, @Rec1@, and -- @:.:@. -- -- @argTyFold@ is safer than @arg@ because @arg@ would lead to a GHC panic for -- some data types. The problematic case is when @t@ is an application of a -- non-representable type @f@ to @argVar@: @App f [argVar]@ is caught by the -- @_@ pattern, and ends up represented as @Rec0 t@. This type occurs /free/ in -- the RHS of the eventual @Rep1@ instance, which is therefore ill-formed. Some -- representable1 checks have been relaxed, and others were moved to -- @canDoGenerics1@. argTyFold :: forall a. TyVar -> ArgTyAlg a -> Type -> a argTyFold argVar (ArgTyAlg {ata_rec0 = mkRec0, ata_par1 = mkPar1, ata_rec1 = mkRec1, ata_comp = mkComp}) = -- mkRec0 is the default; use it if there is no interesting structure -- (e.g. occurrences of parameters or recursive occurrences) \t -> maybe (mkRec0 t) id $ go t where go :: Type -> -- type to fold through Maybe a -- the result (e.g. representation type), unless it's trivial go t = isParam `mplus` isApp where isParam = do -- handles parameters t' <- getTyVar_maybe t Just $ if t' == argVar then mkPar1 -- moreover, it is "the" parameter else mkRec0 t -- NB mkRec0 instead of the conventional mkPar0 isApp = do -- handles applications (phi, beta) <- tcSplitAppTy_maybe t let interesting = argVar `elemVarSet` exactTyVarsOfType beta -- Does it have no interesting structure to represent? if not interesting then Nothing else -- Is the argument the parameter? Special case for mkRec1. if Just argVar == getTyVar_maybe beta then Just $ mkRec1 phi else mkComp phi `fmap` go beta -- It must be a composition. tc_mkRepTy :: -- Gen0_ or Gen1_, for Rep or Rep1 GenericKind_ -- The type to generate representation for -> TyCon -- Metadata datatypes to refer to -> MetaTyCons -- Generated representation0 type -> TcM Type tc_mkRepTy gk_ tycon metaDts = do d1 <- tcLookupTyCon d1TyConName c1 <- tcLookupTyCon c1TyConName s1 <- tcLookupTyCon s1TyConName nS1 <- tcLookupTyCon noSelTyConName rec0 <- tcLookupTyCon rec0TyConName rec1 <- tcLookupTyCon rec1TyConName par1 <- tcLookupTyCon par1TyConName u1 <- tcLookupTyCon u1TyConName v1 <- tcLookupTyCon v1TyConName plus <- tcLookupTyCon sumTyConName times <- tcLookupTyCon prodTyConName comp <- tcLookupTyCon compTyConName let mkSum' a b = mkTyConApp plus [a,b] mkProd a b = mkTyConApp times [a,b] mkComp a b = mkTyConApp comp [a,b] mkRec0 a = mkTyConApp rec0 [a] mkRec1 a = mkTyConApp rec1 [a] mkPar1 = mkTyConTy par1 mkD a = mkTyConApp d1 [metaDTyCon, sumP (tyConDataCons a)] mkC i d a = mkTyConApp c1 [d, prod i (dataConInstOrigArgTys a $ mkTyVarTys $ tyConTyVars tycon) (null (dataConFieldLabels a))] -- This field has no label mkS True _ a = mkTyConApp s1 [mkTyConTy nS1, a] -- This field has a label mkS False d a = mkTyConApp s1 [d, a] -- Sums and products are done in the same way for both Rep and Rep1 sumP [] = mkTyConTy v1 sumP l = ASSERT(length metaCTyCons == length l) foldBal mkSum' [ mkC i d a | (d,(a,i)) <- zip metaCTyCons (zip l [0..])] -- The Bool is True if this constructor has labelled fields prod :: Int -> [Type] -> Bool -> Type prod i [] _ = ASSERT(length metaSTyCons > i) ASSERT(length (metaSTyCons !! i) == 0) mkTyConTy u1 prod i l b = ASSERT(length metaSTyCons > i) ASSERT(length l == length (metaSTyCons !! i)) foldBal mkProd [ arg d t b | (d,t) <- zip (metaSTyCons !! i) l ] arg :: Type -> Type -> Bool -> Type arg d t b = mkS b d $ case gk_ of -- Here we previously used Par0 if t was a type variable, but we -- realized that we can't always guarantee that we are wrapping-up -- all type variables in Par0. So we decided to stop using Par0 -- altogether, and use Rec0 all the time. Gen0_ -> mkRec0 t Gen1_ argVar -> argPar argVar t where -- Builds argument represention for Rep1 (more complicated due to -- the presence of composition). argPar argVar = argTyFold argVar $ ArgTyAlg {ata_rec0 = mkRec0, ata_par1 = mkPar1, ata_rec1 = mkRec1, ata_comp = mkComp} metaDTyCon = mkTyConTy (metaD metaDts) metaCTyCons = map mkTyConTy (metaC metaDts) metaSTyCons = map (map mkTyConTy) (metaS metaDts) return (mkD tycon) -------------------------------------------------------------------------------- -- Meta-information -------------------------------------------------------------------------------- data MetaTyCons = MetaTyCons { -- One meta datatype per datatype metaD :: TyCon -- One meta datatype per constructor , metaC :: [TyCon] -- One meta datatype per selector per constructor , metaS :: [[TyCon]] } instance Outputable MetaTyCons where ppr (MetaTyCons d c s) = ppr d $$ vcat (map ppr c) $$ vcat (map ppr (concat s)) metaTyCons2TyCons :: MetaTyCons -> Bag TyCon metaTyCons2TyCons (MetaTyCons d c s) = listToBag (d : c ++ concat s) -- Bindings for Datatype, Constructor, and Selector instances mkBindsMetaD :: FixityEnv -> TyCon -> ( LHsBinds RdrName -- Datatype instance , [LHsBinds RdrName] -- Constructor instances , [[LHsBinds RdrName]]) -- Selector instances mkBindsMetaD fix_env tycon = (dtBinds, allConBinds, allSelBinds) where mkBag l = foldr1 unionBags [ unitBag (mkRdrFunBind (L loc name) matches) | (name, matches) <- l ] dtBinds = mkBag ( [ (datatypeName_RDR, dtName_matches) , (moduleName_RDR, moduleName_matches) , (packageName_RDR, pkgName_matches)] ++ ifElseEmpty (isNewTyCon tycon) [ (isNewtypeName_RDR, isNewtype_matches) ] ) allConBinds = map conBinds datacons conBinds c = mkBag ( [ (conName_RDR, conName_matches c)] ++ ifElseEmpty (dataConIsInfix c) [ (conFixity_RDR, conFixity_matches c) ] ++ ifElseEmpty (length (dataConFieldLabels c) > 0) [ (conIsRecord_RDR, conIsRecord_matches c) ] ) ifElseEmpty p x = if p then x else [] fixity c = case lookupFixity fix_env (dataConName c) of Fixity n InfixL -> buildFix n leftAssocDataCon_RDR Fixity n InfixR -> buildFix n rightAssocDataCon_RDR Fixity n InfixN -> buildFix n notAssocDataCon_RDR buildFix n assoc = nlHsApps infixDataCon_RDR [nlHsVar assoc , nlHsIntLit (toInteger n)] allSelBinds = map (map selBinds) datasels selBinds s = mkBag [(selName_RDR, selName_matches s)] loc = srcLocSpan (getSrcLoc tycon) mkStringLHS s = [mkSimpleHsAlt nlWildPat (nlHsLit (mkHsString s))] datacons = tyConDataCons tycon datasels = map dataConFieldLabels datacons tyConName_user = case tyConFamInst_maybe tycon of Just (ptycon, _) -> tyConName ptycon Nothing -> tyConName tycon dtName_matches = mkStringLHS . occNameString . nameOccName $ tyConName_user moduleName_matches = mkStringLHS . moduleNameString . moduleName . nameModule . tyConName $ tycon pkgName_matches = mkStringLHS . packageKeyString . modulePackageKey . nameModule . tyConName $ tycon isNewtype_matches = [mkSimpleHsAlt nlWildPat (nlHsVar true_RDR)] conName_matches c = mkStringLHS . occNameString . nameOccName . dataConName $ c conFixity_matches c = [mkSimpleHsAlt nlWildPat (fixity c)] conIsRecord_matches _ = [mkSimpleHsAlt nlWildPat (nlHsVar true_RDR)] selName_matches s = mkStringLHS (occNameString (nameOccName s)) -------------------------------------------------------------------------------- -- Dealing with sums -------------------------------------------------------------------------------- mkSum :: GenericKind_ -- Generic or Generic1? -> US -- Base for generating unique names -> TyCon -- The type constructor -> [DataCon] -- The data constructors -> ([Alt], -- Alternatives for the T->Trep "from" function [Alt]) -- Alternatives for the Trep->T "to" function -- Datatype without any constructors mkSum _ _ tycon [] = ([from_alt], [to_alt]) where from_alt = (nlWildPat, mkM1_E (makeError errMsgFrom)) to_alt = (mkM1_P nlWildPat, makeError errMsgTo) -- These M1s are meta-information for the datatype makeError s = nlHsApp (nlHsVar error_RDR) (nlHsLit (mkHsString s)) tyConStr = occNameString (nameOccName (tyConName tycon)) errMsgFrom = "No generic representation for empty datatype " ++ tyConStr errMsgTo = "No values for empty datatype " ++ tyConStr -- Datatype with at least one constructor mkSum gk_ us _ datacons = -- switch the payload of gk_ to be datacon-centric instead of tycon-centric unzip [ mk1Sum (gk2gkDC gk_ d) us i (length datacons) d | (d,i) <- zip datacons [1..] ] -- Build the sum for a particular constructor mk1Sum :: GenericKind_DC -- Generic or Generic1? -> US -- Base for generating unique names -> Int -- The index of this constructor -> Int -- Total number of constructors -> DataCon -- The data constructor -> (Alt, -- Alternative for the T->Trep "from" function Alt) -- Alternative for the Trep->T "to" function mk1Sum gk_ us i n datacon = (from_alt, to_alt) where gk = forgetArgVar gk_ -- Existentials already excluded argTys = dataConOrigArgTys datacon n_args = dataConSourceArity datacon datacon_varTys = zip (map mkGenericLocal [us .. us+n_args-1]) argTys datacon_vars = map fst datacon_varTys us' = us + n_args datacon_rdr = getRdrName datacon from_alt = (nlConVarPat datacon_rdr datacon_vars, from_alt_rhs) from_alt_rhs = mkM1_E (genLR_E i n (mkProd_E gk_ us' datacon_varTys)) to_alt = (mkM1_P (genLR_P i n (mkProd_P gk us' datacon_vars)), to_alt_rhs) -- These M1s are meta-information for the datatype to_alt_rhs = case gk_ of Gen0_DC -> nlHsVarApps datacon_rdr datacon_vars Gen1_DC argVar -> nlHsApps datacon_rdr $ map argTo datacon_varTys where argTo (var, ty) = converter ty `nlHsApp` nlHsVar var where converter = argTyFold argVar $ ArgTyAlg {ata_rec0 = const $ nlHsVar unK1_RDR, ata_par1 = nlHsVar unPar1_RDR, ata_rec1 = const $ nlHsVar unRec1_RDR, ata_comp = \_ cnv -> (nlHsVar fmap_RDR `nlHsApp` cnv) `nlHsCompose` nlHsVar unComp1_RDR} -- Generates the L1/R1 sum pattern genLR_P :: Int -> Int -> LPat RdrName -> LPat RdrName genLR_P i n p | n == 0 = error "impossible" | n == 1 = p | i <= div n 2 = nlConPat l1DataCon_RDR [genLR_P i (div n 2) p] | otherwise = nlConPat r1DataCon_RDR [genLR_P (i-m) (n-m) p] where m = div n 2 -- Generates the L1/R1 sum expression genLR_E :: Int -> Int -> LHsExpr RdrName -> LHsExpr RdrName genLR_E i n e | n == 0 = error "impossible" | n == 1 = e | i <= div n 2 = nlHsVar l1DataCon_RDR `nlHsApp` genLR_E i (div n 2) e | otherwise = nlHsVar r1DataCon_RDR `nlHsApp` genLR_E (i-m) (n-m) e where m = div n 2 -------------------------------------------------------------------------------- -- Dealing with products -------------------------------------------------------------------------------- -- Build a product expression mkProd_E :: GenericKind_DC -- Generic or Generic1? -> US -- Base for unique names -> [(RdrName, Type)] -- List of variables matched on the lhs and their types -> LHsExpr RdrName -- Resulting product expression mkProd_E _ _ [] = mkM1_E (nlHsVar u1DataCon_RDR) mkProd_E gk_ _ varTys = mkM1_E (foldBal prod appVars) -- These M1s are meta-information for the constructor where appVars = map (wrapArg_E gk_) varTys prod a b = prodDataCon_RDR `nlHsApps` [a,b] wrapArg_E :: GenericKind_DC -> (RdrName, Type) -> LHsExpr RdrName wrapArg_E Gen0_DC (var, _) = mkM1_E (k1DataCon_RDR `nlHsVarApps` [var]) -- This M1 is meta-information for the selector wrapArg_E (Gen1_DC argVar) (var, ty) = mkM1_E $ converter ty `nlHsApp` nlHsVar var -- This M1 is meta-information for the selector where converter = argTyFold argVar $ ArgTyAlg {ata_rec0 = const $ nlHsVar k1DataCon_RDR, ata_par1 = nlHsVar par1DataCon_RDR, ata_rec1 = const $ nlHsVar rec1DataCon_RDR, ata_comp = \_ cnv -> nlHsVar comp1DataCon_RDR `nlHsCompose` (nlHsVar fmap_RDR `nlHsApp` cnv)} -- Build a product pattern mkProd_P :: GenericKind -- Gen0 or Gen1 -> US -- Base for unique names -> [RdrName] -- List of variables to match -> LPat RdrName -- Resulting product pattern mkProd_P _ _ [] = mkM1_P (nlNullaryConPat u1DataCon_RDR) mkProd_P gk _ vars = mkM1_P (foldBal prod appVars) -- These M1s are meta-information for the constructor where appVars = map (wrapArg_P gk) vars prod a b = prodDataCon_RDR `nlConPat` [a,b] wrapArg_P :: GenericKind -> RdrName -> LPat RdrName wrapArg_P Gen0 v = mkM1_P (k1DataCon_RDR `nlConVarPat` [v]) -- This M1 is meta-information for the selector wrapArg_P Gen1 v = m1DataCon_RDR `nlConVarPat` [v] mkGenericLocal :: US -> RdrName mkGenericLocal u = mkVarUnqual (mkFastString ("g" ++ show u)) mkM1_E :: LHsExpr RdrName -> LHsExpr RdrName mkM1_E e = nlHsVar m1DataCon_RDR `nlHsApp` e mkM1_P :: LPat RdrName -> LPat RdrName mkM1_P p = m1DataCon_RDR `nlConPat` [p] nlHsCompose :: LHsExpr RdrName -> LHsExpr RdrName -> LHsExpr RdrName nlHsCompose x y = compose_RDR `nlHsApps` [x, y] -- | Variant of foldr1 for producing balanced lists foldBal :: (a -> a -> a) -> [a] -> a foldBal op = foldBal' op (error "foldBal: empty list") foldBal' :: (a -> a -> a) -> a -> [a] -> a foldBal' _ x [] = x foldBal' _ _ [y] = y foldBal' op x l = let (a,b) = splitAt (length l `div` 2) l in foldBal' op x a `op` foldBal' op x b
ghc-android/ghc
compiler/typecheck/TcGenGenerics.hs
Haskell
bsd-3-clause
37,323
<!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta charset="utf-8" /> <title>statsmodels.robust.scale &#8212; statsmodels v0.10.0 documentation</title> <link rel="stylesheet" href="../../../_static/nature.css" type="text/css" /> <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" /> <link rel="stylesheet" type="text/css" href="../../../_static/graphviz.css" /> <script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script> <script type="text/javascript" src="../../../_static/jquery.js"></script> <script type="text/javascript" src="../../../_static/underscore.js"></script> <script type="text/javascript" src="../../../_static/doctools.js"></script> <script type="text/javascript" src="../../../_static/language_data.js"></script> <script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script> <link rel="shortcut icon" href="../../../_static/statsmodels_hybi_favico.ico"/> <link rel="author" title="About these documents" href="../../../about.html" /> <link rel="index" title="Index" href="../../../genindex.html" /> <link rel="search" title="Search" href="../../../search.html" /> <link rel="stylesheet" href="../../../_static/examples.css" type="text/css" /> <link rel="stylesheet" href="../../../_static/facebox.css" type="text/css" /> <script type="text/javascript" src="../../../_static/scripts.js"> </script> <script type="text/javascript" src="../../../_static/facebox.js"> </script> <script type="text/javascript"> $.facebox.settings.closeImage = "../../../_static/closelabel.png" $.facebox.settings.loadingImage = "../../../_static/loading.gif" </script> <script> $(document).ready(function() { $.getJSON("../../../../versions.json", function(versions) { var dropdown = document.createElement("div"); dropdown.className = "dropdown"; var button = document.createElement("button"); button.className = "dropbtn"; button.innerHTML = "Other Versions"; var content = document.createElement("div"); content.className = "dropdown-content"; dropdown.appendChild(button); dropdown.appendChild(content); $(".header").prepend(dropdown); for (var i = 0; i < versions.length; i++) { if (versions[i].substring(0, 1) == "v") { versions[i] = [versions[i], versions[i].substring(1)]; } else { versions[i] = [versions[i], versions[i]]; }; }; for (var i = 0; i < versions.length; i++) { var a = document.createElement("a"); a.innerHTML = versions[i][1]; a.href = "../../../../" + versions[i][0] + "/index.html"; a.title = versions[i][1]; $(".dropdown-content").append(a); }; }); }); </script> </head><body> <div class="headerwrap"> <div class = "header"> <a href = "../../../index.html"> <img src="../../../_static/statsmodels_hybi_banner.png" alt="Logo" style="padding-left: 15px"/></a> </div> </div> <div class="related" role="navigation" aria-label="related navigation"> <h3>Navigation</h3> <ul> <li class="right" style="margin-right: 10px"> <a href="../../../genindex.html" title="General Index" accesskey="I">index</a></li> <li class="right" > <a href="../../../py-modindex.html" title="Python Module Index" >modules</a> |</li> <li><a href ="../../../install.html">Install</a></li> &nbsp;|&nbsp; <li><a href="https://groups.google.com/forum/?hl=en#!forum/pystatsmodels">Support</a></li> &nbsp;|&nbsp; <li><a href="https://github.com/statsmodels/statsmodels/issues">Bugs</a></li> &nbsp;|&nbsp; <li><a href="../../../dev/index.html">Develop</a></li> &nbsp;|&nbsp; <li><a href="../../../examples/index.html">Examples</a></li> &nbsp;|&nbsp; <li><a href="../../../faq.html">FAQ</a></li> &nbsp;|&nbsp; <li class="nav-item nav-item-1"><a href="../../index.html" accesskey="U">Module code</a> |</li> </ul> </div> <div class="document"> <div class="documentwrapper"> <div class="bodywrapper"> <div class="body" role="main"> <h1>Source code for statsmodels.robust.scale</h1><div class="highlight"><pre> <span></span><span class="sd">&quot;&quot;&quot;</span> <span class="sd">Support and standalone functions for Robust Linear Models</span> <span class="sd">References</span> <span class="sd">----------</span> <span class="sd">PJ Huber. &#39;Robust Statistics&#39; John Wiley and Sons, Inc., New York, 1981.</span> <span class="sd">R Venables, B Ripley. &#39;Modern Applied Statistics in S&#39;</span> <span class="sd"> Springer, New York, 2002.</span> <span class="sd">&quot;&quot;&quot;</span> <span class="kn">from</span> <span class="nn">statsmodels.compat.python</span> <span class="k">import</span> <span class="nb">range</span> <span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span> <span class="kn">from</span> <span class="nn">scipy.stats</span> <span class="k">import</span> <span class="n">norm</span> <span class="k">as</span> <span class="n">Gaussian</span> <span class="kn">from</span> <span class="nn">.</span> <span class="k">import</span> <span class="n">norms</span> <span class="kn">from</span> <span class="nn">statsmodels.tools</span> <span class="k">import</span> <span class="n">tools</span> <div class="viewcode-block" id="mad"><a class="viewcode-back" href="../../../generated/statsmodels.robust.scale.mad.html#statsmodels.robust.scale.mad">[docs]</a><span class="k">def</span> <span class="nf">mad</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">c</span><span class="o">=</span><span class="n">Gaussian</span><span class="o">.</span><span class="n">ppf</span><span class="p">(</span><span class="mi">3</span><span class="o">/</span><span class="mf">4.</span><span class="p">),</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">center</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">median</span><span class="p">):</span> <span class="c1"># c \approx .6745</span> <span class="sd">&quot;&quot;&quot;</span> <span class="sd"> The Median Absolute Deviation along given axis of an array</span> <span class="sd"> Parameters</span> <span class="sd"> ----------</span> <span class="sd"> a : array-like</span> <span class="sd"> Input array.</span> <span class="sd"> c : float, optional</span> <span class="sd"> The normalization constant. Defined as scipy.stats.norm.ppf(3/4.),</span> <span class="sd"> which is approximately .6745.</span> <span class="sd"> axis : int, optional</span> <span class="sd"> The defaul is 0. Can also be None.</span> <span class="sd"> center : callable or float</span> <span class="sd"> If a callable is provided, such as the default `np.median` then it</span> <span class="sd"> is expected to be called center(a). The axis argument will be applied</span> <span class="sd"> via np.apply_over_axes. Otherwise, provide a float.</span> <span class="sd"> Returns</span> <span class="sd"> -------</span> <span class="sd"> mad : float</span> <span class="sd"> `mad` = median(abs(`a` - center))/`c`</span> <span class="sd"> &quot;&quot;&quot;</span> <span class="n">a</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asarray</span><span class="p">(</span><span class="n">a</span><span class="p">)</span> <span class="k">if</span> <span class="n">callable</span><span class="p">(</span><span class="n">center</span><span class="p">):</span> <span class="n">center</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">apply_over_axes</span><span class="p">(</span><span class="n">center</span><span class="p">,</span> <span class="n">a</span><span class="p">,</span> <span class="n">axis</span><span class="p">)</span> <span class="k">return</span> <span class="n">np</span><span class="o">.</span><span class="n">median</span><span class="p">((</span><span class="n">np</span><span class="o">.</span><span class="n">fabs</span><span class="p">(</span><span class="n">a</span><span class="o">-</span><span class="n">center</span><span class="p">))</span><span class="o">/</span><span class="n">c</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">)</span></div> <div class="viewcode-block" id="Huber"><a class="viewcode-back" href="../../../generated/statsmodels.robust.scale.Huber.html#statsmodels.robust.scale.Huber">[docs]</a><span class="k">class</span> <span class="nc">Huber</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span> <span class="sd">&quot;&quot;&quot;</span> <span class="sd"> Huber&#39;s proposal 2 for estimating location and scale jointly.</span> <span class="sd"> Parameters</span> <span class="sd"> ----------</span> <span class="sd"> c : float, optional</span> <span class="sd"> Threshold used in threshold for chi=psi**2. Default value is 1.5.</span> <span class="sd"> tol : float, optional</span> <span class="sd"> Tolerance for convergence. Default value is 1e-08.</span> <span class="sd"> maxiter : int, optional0</span> <span class="sd"> Maximum number of iterations. Default value is 30.</span> <span class="sd"> norm : statsmodels.robust.norms.RobustNorm, optional</span> <span class="sd"> A robust norm used in M estimator of location. If None,</span> <span class="sd"> the location estimator defaults to a one-step</span> <span class="sd"> fixed point version of the M-estimator using Huber&#39;s T.</span> <span class="sd"> call</span> <span class="sd"> Return joint estimates of Huber&#39;s scale and location.</span> <span class="sd"> Examples</span> <span class="sd"> --------</span> <span class="sd"> &gt;&gt;&gt; import numpy as np</span> <span class="sd"> &gt;&gt;&gt; import statsmodels.api as sm</span> <span class="sd"> &gt;&gt;&gt; chem_data = np.array([2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03,</span> <span class="sd"> ... 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7,</span> <span class="sd"> ... 3.77, 5.28, 28.95])</span> <span class="sd"> &gt;&gt;&gt; sm.robust.scale.huber(chem_data)</span> <span class="sd"> (array(3.2054980819923693), array(0.67365260010478967))</span> <span class="sd"> &quot;&quot;&quot;</span> <span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">c</span><span class="o">=</span><span class="mf">1.5</span><span class="p">,</span> <span class="n">tol</span><span class="o">=</span><span class="mf">1.0e-08</span><span class="p">,</span> <span class="n">maxiter</span><span class="o">=</span><span class="mi">30</span><span class="p">,</span> <span class="n">norm</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> <span class="bp">self</span><span class="o">.</span><span class="n">c</span> <span class="o">=</span> <span class="n">c</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span> <span class="o">=</span> <span class="n">maxiter</span> <span class="bp">self</span><span class="o">.</span><span class="n">tol</span> <span class="o">=</span> <span class="n">tol</span> <span class="bp">self</span><span class="o">.</span><span class="n">norm</span> <span class="o">=</span> <span class="n">norm</span> <span class="n">tmp</span> <span class="o">=</span> <span class="mi">2</span> <span class="o">*</span> <span class="n">Gaussian</span><span class="o">.</span><span class="n">cdf</span><span class="p">(</span><span class="n">c</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">=</span> <span class="n">tmp</span> <span class="o">+</span> <span class="n">c</span><span class="o">**</span><span class="mi">2</span> <span class="o">*</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">tmp</span><span class="p">)</span> <span class="o">-</span> <span class="mi">2</span> <span class="o">*</span> <span class="n">c</span> <span class="o">*</span> <span class="n">Gaussian</span><span class="o">.</span><span class="n">pdf</span><span class="p">(</span><span class="n">c</span><span class="p">)</span> <div class="viewcode-block" id="Huber.__call__"><a class="viewcode-back" href="../../../generated/statsmodels.robust.scale.Huber.__call__.html#statsmodels.robust.scale.Huber.__call__">[docs]</a> <span class="k">def</span> <span class="nf">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">a</span><span class="p">,</span> <span class="n">mu</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">initscale</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span> <span class="sd">&quot;&quot;&quot;</span> <span class="sd"> Compute Huber&#39;s proposal 2 estimate of scale, using an optional</span> <span class="sd"> initial value of scale and an optional estimate of mu. If mu</span> <span class="sd"> is supplied, it is not reestimated.</span> <span class="sd"> Parameters</span> <span class="sd"> ----------</span> <span class="sd"> a : array</span> <span class="sd"> 1d array</span> <span class="sd"> mu : float or None, optional</span> <span class="sd"> If the location mu is supplied then it is not reestimated.</span> <span class="sd"> Default is None, which means that it is estimated.</span> <span class="sd"> initscale : float or None, optional</span> <span class="sd"> A first guess on scale. If initscale is None then the standardized</span> <span class="sd"> median absolute deviation of a is used.</span> <span class="sd"> Notes</span> <span class="sd"> -----</span> <span class="sd"> `Huber` minimizes the function</span> <span class="sd"> sum(psi((a[i]-mu)/scale)**2)</span> <span class="sd"> as a function of (mu, scale), where</span> <span class="sd"> psi(x) = np.clip(x, -self.c, self.c)</span> <span class="sd"> &quot;&quot;&quot;</span> <span class="n">a</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">asarray</span><span class="p">(</span><span class="n">a</span><span class="p">)</span> <span class="k">if</span> <span class="n">mu</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> <span class="n">n</span> <span class="o">=</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">-</span> <span class="mi">1</span> <span class="n">mu</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">median</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">)</span> <span class="n">est_mu</span> <span class="o">=</span> <span class="kc">True</span> <span class="k">else</span><span class="p">:</span> <span class="n">n</span> <span class="o">=</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="n">mu</span> <span class="o">=</span> <span class="n">mu</span> <span class="n">est_mu</span> <span class="o">=</span> <span class="kc">False</span> <span class="k">if</span> <span class="n">initscale</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> <span class="n">scale</span> <span class="o">=</span> <span class="n">mad</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">axis</span><span class="o">=</span><span class="n">axis</span><span class="p">)</span> <span class="k">else</span><span class="p">:</span> <span class="n">scale</span> <span class="o">=</span> <span class="n">initscale</span> <span class="n">scale</span> <span class="o">=</span> <span class="n">tools</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="n">scale</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="n">mu</span> <span class="o">=</span> <span class="n">tools</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="n">mu</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_estimate_both</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">mu</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">est_mu</span><span class="p">,</span> <span class="n">n</span><span class="p">)</span></div> <span class="k">def</span> <span class="nf">_estimate_both</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">a</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">mu</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">est_mu</span><span class="p">,</span> <span class="n">n</span><span class="p">):</span> <span class="sd">&quot;&quot;&quot;</span> <span class="sd"> Estimate scale and location simultaneously with the following</span> <span class="sd"> pseudo_loop:</span> <span class="sd"> while not_converged:</span> <span class="sd"> mu, scale = estimate_location(a, scale, mu), estimate_scale(a, scale, mu)</span> <span class="sd"> where estimate_location is an M-estimator and estimate_scale implements</span> <span class="sd"> the check used in Section 5.5 of Venables &amp; Ripley</span> <span class="sd"> &quot;&quot;&quot;</span> <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span><span class="p">):</span> <span class="c1"># Estimate the mean along a given axis</span> <span class="k">if</span> <span class="n">est_mu</span><span class="p">:</span> <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">norm</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> <span class="c1"># This is a one-step fixed-point estimator</span> <span class="c1"># if self.norm == norms.HuberT</span> <span class="c1"># It should be faster than using norms.HuberT</span> <span class="n">nmu</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">clip</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">mu</span><span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">c</span><span class="o">*</span><span class="n">scale</span><span class="p">,</span> <span class="n">mu</span><span class="o">+</span><span class="bp">self</span><span class="o">.</span><span class="n">c</span><span class="o">*</span><span class="n">scale</span><span class="p">)</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">axis</span><span class="p">)</span> <span class="o">/</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="k">else</span><span class="p">:</span> <span class="n">nmu</span> <span class="o">=</span> <span class="n">norms</span><span class="o">.</span><span class="n">estimate_location</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">norm</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">mu</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">tol</span><span class="p">)</span> <span class="k">else</span><span class="p">:</span> <span class="c1"># Effectively, do nothing</span> <span class="n">nmu</span> <span class="o">=</span> <span class="n">mu</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span> <span class="n">nmu</span> <span class="o">=</span> <span class="n">tools</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="n">nmu</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="n">subset</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">less_equal</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">fabs</span><span class="p">((</span><span class="n">a</span> <span class="o">-</span> <span class="n">mu</span><span class="p">)</span><span class="o">/</span><span class="n">scale</span><span class="p">),</span> <span class="bp">self</span><span class="o">.</span><span class="n">c</span><span class="p">)</span> <span class="n">card</span> <span class="o">=</span> <span class="n">subset</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">axis</span><span class="p">)</span> <span class="n">nscale</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">subset</span> <span class="o">*</span> <span class="p">(</span><span class="n">a</span> <span class="o">-</span> <span class="n">nmu</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span><span class="p">,</span> <span class="n">axis</span><span class="p">)</span> \ <span class="o">/</span> <span class="p">(</span><span class="n">n</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">-</span> <span class="p">(</span><span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="n">axis</span><span class="p">]</span> <span class="o">-</span> <span class="n">card</span><span class="p">)</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">c</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span> <span class="n">nscale</span> <span class="o">=</span> <span class="n">tools</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="n">nscale</span><span class="p">,</span> <span class="n">axis</span><span class="p">,</span> <span class="n">a</span><span class="o">.</span><span class="n">shape</span><span class="p">)</span> <span class="n">test1</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">alltrue</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">less_equal</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">fabs</span><span class="p">(</span><span class="n">scale</span> <span class="o">-</span> <span class="n">nscale</span><span class="p">),</span> <span class="n">nscale</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">tol</span><span class="p">))</span> <span class="n">test2</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">alltrue</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">less_equal</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">fabs</span><span class="p">(</span><span class="n">mu</span> <span class="o">-</span> <span class="n">nmu</span><span class="p">),</span> <span class="n">nscale</span><span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">tol</span><span class="p">))</span> <span class="k">if</span> <span class="ow">not</span> <span class="p">(</span><span class="n">test1</span> <span class="ow">and</span> <span class="n">test2</span><span class="p">):</span> <span class="n">mu</span> <span class="o">=</span> <span class="n">nmu</span> <span class="n">scale</span> <span class="o">=</span> <span class="n">nscale</span> <span class="k">else</span><span class="p">:</span> <span class="k">return</span> <span class="n">nmu</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(),</span> <span class="n">nscale</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span> <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s1">&#39;joint estimation of location and scale failed to converge in </span><span class="si">%d</span><span class="s1"> iterations&#39;</span> <span class="o">%</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span><span class="p">)</span></div> <span class="n">huber</span> <span class="o">=</span> <span class="n">Huber</span><span class="p">()</span> <div class="viewcode-block" id="HuberScale"><a class="viewcode-back" href="../../../generated/statsmodels.robust.scale.HuberScale.html#statsmodels.robust.scale.HuberScale">[docs]</a><span class="k">class</span> <span class="nc">HuberScale</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span> <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span> <span class="sd"> Huber&#39;s scaling for fitting robust linear models.</span> <span class="sd"> Huber&#39;s scale is intended to be used as the scale estimate in the</span> <span class="sd"> IRLS algorithm and is slightly different than the `Huber` class.</span> <span class="sd"> Parameters</span> <span class="sd"> ----------</span> <span class="sd"> d : float, optional</span> <span class="sd"> d is the tuning constant for Huber&#39;s scale. Default is 2.5</span> <span class="sd"> tol : float, optional</span> <span class="sd"> The convergence tolerance</span> <span class="sd"> maxiter : int, optiona</span> <span class="sd"> The maximum number of iterations. The default is 30.</span> <span class="sd"> Methods</span> <span class="sd"> -------</span> <span class="sd"> call</span> <span class="sd"> Return&#39;s Huber&#39;s scale computed as below</span> <span class="sd"> Notes</span> <span class="sd"> --------</span> <span class="sd"> Huber&#39;s scale is the iterative solution to</span> <span class="sd"> scale_(i+1)**2 = 1/(n*h)*sum(chi(r/sigma_i)*sigma_i**2</span> <span class="sd"> where the Huber function is</span> <span class="sd"> chi(x) = (x**2)/2 for \|x\| &lt; d</span> <span class="sd"> chi(x) = (d**2)/2 for \|x\| &gt;= d</span> <span class="sd"> and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\</span> <span class="sd"> scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2)</span> <span class="sd"> &quot;&quot;&quot;</span> <span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">d</span><span class="o">=</span><span class="mf">2.5</span><span class="p">,</span> <span class="n">tol</span><span class="o">=</span><span class="mf">1e-08</span><span class="p">,</span> <span class="n">maxiter</span><span class="o">=</span><span class="mi">30</span><span class="p">):</span> <span class="bp">self</span><span class="o">.</span><span class="n">d</span> <span class="o">=</span> <span class="n">d</span> <span class="bp">self</span><span class="o">.</span><span class="n">tol</span> <span class="o">=</span> <span class="n">tol</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span> <span class="o">=</span> <span class="n">maxiter</span> <div class="viewcode-block" id="HuberScale.__call__"><a class="viewcode-back" href="../../../generated/statsmodels.robust.scale.HuberScale.__call__.html#statsmodels.robust.scale.HuberScale.__call__">[docs]</a> <span class="k">def</span> <span class="nf">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">df_resid</span><span class="p">,</span> <span class="n">nobs</span><span class="p">,</span> <span class="n">resid</span><span class="p">):</span> <span class="n">h</span> <span class="o">=</span> <span class="p">(</span><span class="n">df_resid</span><span class="p">)</span><span class="o">/</span><span class="n">nobs</span><span class="o">*</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="o">**</span><span class="mi">2</span> <span class="o">+</span> <span class="p">(</span><span class="mi">1</span><span class="o">-</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span><span class="o">*</span>\ <span class="n">Gaussian</span><span class="o">.</span><span class="n">cdf</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="p">)</span><span class="o">-.</span><span class="mi">5</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="o">/</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="mi">2</span><span class="o">*</span><span class="n">np</span><span class="o">.</span><span class="n">pi</span><span class="p">))</span><span class="o">*</span>\ <span class="n">np</span><span class="o">.</span><span class="n">exp</span><span class="p">(</span><span class="o">-.</span><span class="mi">5</span><span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span> <span class="n">s</span> <span class="o">=</span> <span class="n">mad</span><span class="p">(</span><span class="n">resid</span><span class="p">)</span> <span class="n">subset</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">np</span><span class="o">.</span><span class="n">less</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">fabs</span><span class="p">(</span><span class="n">resid</span><span class="o">/</span><span class="n">x</span><span class="p">),</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="p">)</span> <span class="n">chi</span> <span class="o">=</span> <span class="k">lambda</span> <span class="n">s</span><span class="p">:</span> <span class="n">subset</span><span class="p">(</span><span class="n">s</span><span class="p">)</span><span class="o">*</span><span class="p">(</span><span class="n">resid</span><span class="o">/</span><span class="n">s</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span><span class="o">/</span><span class="mi">2</span><span class="o">+</span><span class="p">(</span><span class="mi">1</span><span class="o">-</span><span class="n">subset</span><span class="p">(</span><span class="n">s</span><span class="p">))</span><span class="o">*</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">d</span><span class="o">**</span><span class="mi">2</span><span class="o">/</span><span class="mi">2</span><span class="p">)</span> <span class="n">scalehist</span> <span class="o">=</span> <span class="p">[</span><span class="n">np</span><span class="o">.</span><span class="n">inf</span><span class="p">,</span><span class="n">s</span><span class="p">]</span> <span class="n">niter</span> <span class="o">=</span> <span class="mi">1</span> <span class="k">while</span> <span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">abs</span><span class="p">(</span><span class="n">scalehist</span><span class="p">[</span><span class="n">niter</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">-</span> <span class="n">scalehist</span><span class="p">[</span><span class="n">niter</span><span class="p">])</span><span class="o">&gt;</span><span class="bp">self</span><span class="o">.</span><span class="n">tol</span> \ <span class="ow">and</span> <span class="n">niter</span> <span class="o">&lt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxiter</span><span class="p">):</span> <span class="n">nscale</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sqrt</span><span class="p">(</span><span class="mi">1</span><span class="o">/</span><span class="p">(</span><span class="n">nobs</span><span class="o">*</span><span class="n">h</span><span class="p">)</span><span class="o">*</span><span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">chi</span><span class="p">(</span><span class="n">scalehist</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]))</span><span class="o">*</span>\ <span class="n">scalehist</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span> <span class="n">scalehist</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">nscale</span><span class="p">)</span> <span class="n">niter</span> <span class="o">+=</span> <span class="mi">1</span> <span class="c1">#if niter == self.maxiter:</span> <span class="c1"># raise ValueError(&quot;Huber&#39;s scale failed to converge&quot;)</span> <span class="k">return</span> <span class="n">scalehist</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span></div></div> <span class="n">hubers_scale</span> <span class="o">=</span> <span class="n">HuberScale</span><span class="p">()</span> </pre></div> </div> </div> </div> <div class="sphinxsidebar" role="navigation" aria-label="main navigation"> <div class="sphinxsidebarwrapper"> <div id="searchbox" style="display: none" role="search"> <h3 id="searchlabel">Quick search</h3> <div class="searchformwrapper"> <form class="search" action="../../../search.html" method="get"> <input type="text" name="q" aria-labelledby="searchlabel" /> <input type="submit" value="Go" /> </form> </div> </div> <script type="text/javascript">$('#searchbox').show(0);</script> </div> </div> <div class="clearer"></div> </div> <div class="footer" role="contentinfo"> &#169; Copyright 2009-2018, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 2.1.2. </div> </body> </html>
statsmodels/statsmodels.github.io
v0.10.0/_modules/statsmodels/robust/scale.html
HTML
bsd-3-clause
37,935
/************************************************************************/ /* */ /* PmodR2R.c -- Template driver for a Pmod which uses GPIO */ /* */ /************************************************************************/ /* Author: Thomas Kappenman, Arthur Brown */ /* Copyright 2015, Digilent Inc. */ /************************************************************************/ /* This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /************************************************************************/ /* File Description: */ /* */ /* This file contains a template that you can use to create a library */ /* for the PmodR2R (insert name, duh). */ /* */ /************************************************************************/ /* Revision History: */ /* */ /* 04/19/2016(TommyK): Created /* 06/13/2016(ArtVVB): Edited for PmodR2R */ /* */ /************************************************************************/ #ifndef PmodR2R_H #define PmodR2R_H /****************** Include Files ********************/ #include "xil_types.h" #include "xstatus.h" /* ------------------------------------------------------------ */ /* Definitions */ /* ------------------------------------------------------------ */ #define bool u8 #define true 1 #define false 0 typedef struct PmodR2R{ u32 GPIO_addr; }PmodR2R; void R2R_begin(PmodR2R* InstancePtr, u32 GPIO_Address); void R2R_writeVoltage(PmodR2R* InstancePtr, double voltage); void R2R_delay(int millis); #endif // PmodR2R_H
AEW2015/PYNQ_PR_Overlay
Pynq-Z1/vivado/ip/Pmods/PmodR2R_v1_0/drivers/PmodR2R_v1_0/src/PmodR2R.h
C
bsd-3-clause
2,337
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ash/policy/networking/user_network_configuration_updater.h" #include <utility> #include "base/bind.h" #include "base/callback_helpers.h" #include "base/check_op.h" #include "base/task/bind_post_task.h" #include "base/threading/sequenced_task_runner_handle.h" #include "base/values.h" #include "chrome/browser/ash/login/session/user_session_manager.h" #include "chrome/browser/chrome_notification_types.h" #include "chrome/browser/net/nss_service.h" #include "chrome/browser/net/nss_service_factory.h" #include "chrome/browser/profiles/profile.h" #include "chromeos/network/managed_network_configuration_handler.h" #include "chromeos/network/network_cert_loader.h" #include "chromeos/network/onc/onc_certificate_importer_impl.h" #include "chromeos/network/onc/onc_parsed_certificates.h" #include "chromeos/network/onc/onc_utils.h" #include "components/policy/policy_constants.h" #include "components/user_manager/user.h" #include "content/public/browser/browser_task_traits.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/notification_source.h" namespace policy { namespace { void GetNssCertDatabaseOnIOThread( NssCertDatabaseGetter database_getter, base::OnceCallback<void(net::NSSCertDatabase*)> callback) { DCHECK_CURRENTLY_ON(content::BrowserThread::IO); auto split_callback = base::SplitOnceCallback(std::move(callback)); net::NSSCertDatabase* cert_db = std::move(database_getter).Run(std::move(split_callback.first)); if (cert_db) std::move(split_callback.second).Run(cert_db); } } // namespace UserNetworkConfigurationUpdater::~UserNetworkConfigurationUpdater() { // NetworkCertLoader may be not initialized in tests. if (chromeos::NetworkCertLoader::IsInitialized()) { chromeos::NetworkCertLoader::Get()->SetUserPolicyCertificateProvider( nullptr); } } // static std::unique_ptr<UserNetworkConfigurationUpdater> UserNetworkConfigurationUpdater::CreateForUserPolicy( Profile* profile, const user_manager::User& user, PolicyService* policy_service, chromeos::ManagedNetworkConfigurationHandler* network_config_handler) { std::unique_ptr<UserNetworkConfigurationUpdater> updater( new UserNetworkConfigurationUpdater(profile, user, policy_service, network_config_handler)); updater->Init(); return updater; } void UserNetworkConfigurationUpdater::SetClientCertificateImporterForTest( std::unique_ptr<chromeos::onc::CertificateImporter> client_certificate_importer) { SetClientCertificateImporter(std::move(client_certificate_importer)); } // static bool UserNetworkConfigurationUpdater::PolicyHasWebTrustedAuthorityCertificate( const PolicyMap& policy_map) { return NetworkConfigurationUpdater::PolicyHasWebTrustedAuthorityCertificate( policy_map, onc::ONC_SOURCE_USER_POLICY, key::kOpenNetworkConfiguration); } UserNetworkConfigurationUpdater::UserNetworkConfigurationUpdater( Profile* profile, const user_manager::User& user, PolicyService* policy_service, chromeos::ManagedNetworkConfigurationHandler* network_config_handler) : NetworkConfigurationUpdater(onc::ONC_SOURCE_USER_POLICY, key::kOpenNetworkConfiguration, policy_service, network_config_handler), user_(&user) { // The updater is created with |client_certificate_importer_| unset and is // responsible for creating it. This requires |GetNSSCertDatabaseForProfile| // call, which is not safe before the profile initialization is finalized. // Thus, listen for PROFILE_ADDED notification, on which |cert_importer_| // creation should start. https://crbug.com/171406 registrar_.Add(this, chrome::NOTIFICATION_PROFILE_ADDED, content::Source<Profile>(profile)); // Make sure that the |NetworkCertLoader| which makes certificates available // to the chromeos network code gets policy-pushed certificates from the // primary profile. This assumes that a |UserNetworkConfigurationUpdater| is // only created for the primary profile. NetworkCertLoader may be not // initialized in tests. if (chromeos::NetworkCertLoader::IsInitialized()) chromeos::NetworkCertLoader::Get()->SetUserPolicyCertificateProvider(this); } void UserNetworkConfigurationUpdater::ImportClientCertificates() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // If certificate importer is not yet set, the import of client certificates // will be re-triggered in SetClientCertificateImporter. if (client_certificate_importer_) { client_certificate_importer_->ImportClientCertificates( GetClientCertificates(), base::DoNothing()); } } void UserNetworkConfigurationUpdater::ApplyNetworkPolicy( base::ListValue* network_configs_onc, base::DictionaryValue* global_network_config) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(user_); chromeos::onc::ExpandStringPlaceholdersInNetworksForUser(user_, network_configs_onc); // Call on UserSessionManager to send the user's password to session manager // if the password substitution variable exists in the ONC. bool save_password = chromeos::onc::HasUserPasswordSubsitutionVariable(network_configs_onc); ash::UserSessionManager::GetInstance()->VoteForSavingLoginPassword( ash::UserSessionManager::PasswordConsumingService::kNetwork, save_password); network_config_handler_->SetPolicy(onc_source_, user_->username_hash(), *network_configs_onc, *global_network_config); } void UserNetworkConfigurationUpdater::Observe( int type, const content::NotificationSource& source, const content::NotificationDetails& details) { DCHECK_EQ(type, chrome::NOTIFICATION_PROFILE_ADDED); Profile* profile = content::Source<Profile>(source).ptr(); // Note: This unsafely grabs a persistent reference to the `NssService`'s // `NSSCertDatabase`, which may be invalidated once `profile` is shut down. // TODO(https://crbug.com/1186373): Provide better lifetime guarantees and // pass the `NssCertDatabaseGetter` to the `CertificateImporterImpl`. content::GetIOThreadTaskRunner({})->PostTask( FROM_HERE, base::BindOnce( &GetNssCertDatabaseOnIOThread, NssServiceFactory::GetForContext(profile) ->CreateNSSCertDatabaseGetterForIOThread(), base::BindPostTask( base::SequencedTaskRunnerHandle::Get(), base::BindOnce(&UserNetworkConfigurationUpdater:: CreateAndSetClientCertificateImporter, weak_factory_.GetWeakPtr())))); } void UserNetworkConfigurationUpdater::CreateAndSetClientCertificateImporter( net::NSSCertDatabase* database) { DCHECK(database); SetClientCertificateImporter( std::make_unique<chromeos::onc::CertificateImporterImpl>( content::GetIOThreadTaskRunner({}), database)); } void UserNetworkConfigurationUpdater::SetClientCertificateImporter( std::unique_ptr<chromeos::onc::CertificateImporter> client_certificate_importer) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); bool initial_client_certificate_importer = client_certificate_importer_ == nullptr; client_certificate_importer_ = std::move(client_certificate_importer); if (initial_client_certificate_importer && !GetClientCertificates().empty()) { client_certificate_importer_->ImportClientCertificates( GetClientCertificates(), base::DoNothing()); } } } // namespace policy
scheib/chromium
chrome/browser/ash/policy/networking/user_network_configuration_updater.cc
C++
bsd-3-clause
7,921
/*jslint indent: 2 */ /*global document: false, $: false, localStorage: false, chrome:false*/ 'use strict'; var userData, offlineUser; offlineUser = localStorage.getItem('offline_users'); if (offlineUser) { userData = JSON.parse(localStorage.getItem('offline_users-' + offlineUser)); if (userData && userData.offlineData) { chrome.extension.sendMessage({ type: 'userToken', apiToken: userData.offlineData.api_token }); } } (function () { var version, source, s; version = chrome.runtime.getManifest().version; source = 'window.TogglButton = { version: "' + version + '" }'; s = document.createElement('script'); s.textContent = source; document.body.appendChild(s); }()); document.addEventListener('webkitvisibilitychange', function () { if (!document.webkitHidden) { chrome.extension.sendMessage({type: "sync"}, function () {return; }); } }); chrome.extension.sendMessage({type: "sync"}, function () {return; });
eatskolnikov/toggl-button
src/scripts/content/toggl.js
JavaScript
bsd-3-clause
966
<?php namespace Chippyash\Test\Validation\Common; use Chippyash\Validation\Common\ZFValidator as ZendBaseValidator; use Chippyash\Validation\Messenger; use PHPUnit\Framework\TestCase; /** * Generated by PHPUnit_SkeletonGenerator 1.2.1 on 2013-12-08 at 19:09:41. */ class ZendTest extends TestCase { /** * Default set of validators * * @var array */ /** * a set of validators * * @var array */ protected $invokableClasses = array( 'alnum' => 'Laminas\I18n\Validator\Alnum', 'alpha' => 'Laminas\I18n\Validator\Alpha', 'barcode' => 'Laminas\Validator\Barcode', 'between' => 'Laminas\Validator\Between', 'float' => 'Laminas\I18n\Validator\IsFloat', 'int' => 'Laminas\I18n\Validator\IsInt', 'postcode' => 'Laminas\I18n\Validator\PostCode', ); /** * @var Messenger */ protected $messenger; public function setUp(): void { $this->messenger = new Messenger(); } /** * @dataProvider zendValidators * @requires extension intl */ public function testWillReturnCorrectResponseForTestDataSetViaMagicInvokeMethod($baseName, $constructParams, $trueValue, $falseValue) { $className = $this->invokableClasses[$baseName]; $object = new ZendBaseValidator(new $className($constructParams)); $this->assertTrue($object($trueValue, $this->messenger)); $this->assertFalse($object($falseValue, $this->messenger)); } /** * @dataProvider zendValidators * @requires extension intl */ public function testWillReturnCorrectResponseForTestDataSetViaIsValidMethod($baseName, $constructParams, $trueValue, $falseValue) { $className = $this->invokableClasses[$baseName]; $object = new ZendBaseValidator(new $className($constructParams)); $this->assertTrue($object->isValid($trueValue)); $this->assertFalse($object->isValid($falseValue)); } public function testYouCanGetTheUnderlyingZendErrorMessages() { $object = new ZendBaseValidator(new \Laminas\I18n\Validator\Alnum()); $this->assertFalse($object->isValid('')); $this->assertEquals(array('The input is an empty string'), $object->getMessages()); } /** * Tests of Laminas validators * We are not testing all of them! * */ public function zendValidators() { return array( array('alnum', false,'hw1234',''), array('alpha', null,'abcd','1234'), array('barcode', array('adapter'=> 'Code25interleaved'),'1234567895','HHHHHH'), array('between', array('min'=>2,'max'=>5),3,6), array('float', array(),'1234.56','ah67.3'), array('int', array(),'12345','1d456'), array('postcode', array('locale'=>'en-GB'),'NN10 6AG','ZZ2 FY1'), ); } }
chippyash/Validation
test/Chippyash/Test/Validation/Common/ZendTest.php
PHP
bsd-3-clause
3,032
// SPDX-License-Identifier: BSD-3-Clause package org.xbill.DNS.dnssec; import static org.junit.jupiter.api.Assertions.assertEquals; import java.net.InetAddress; import java.net.UnknownHostException; import java.time.Instant; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.xbill.DNS.ARecord; import org.xbill.DNS.DClass; import org.xbill.DNS.ExtendedErrorCodeOption; import org.xbill.DNS.Message; import org.xbill.DNS.Name; import org.xbill.DNS.OPTRecord; import org.xbill.DNS.RRset; import org.xbill.DNS.Record; import org.xbill.DNS.Section; import org.xbill.DNS.Type; /** * These test run checks that are unable to occur during actual validations. * * @author Ingo Bauersachs */ class TestNormallyUnreachableCode { private InetAddress localhost; @BeforeEach void setUp() throws UnknownHostException { localhost = InetAddress.getByAddress(new byte[] {127, 0, 0, 1}); } @Test void testVerifyWithoutSignaturesIsBogus() { DnsSecVerifier verifier = new DnsSecVerifier(); ARecord record = new ARecord(Name.root, DClass.IN, 120, localhost); SRRset set = new SRRset(); set.addRR(record); RRset keys = new RRset(); JustifiedSecStatus res = verifier.verify(set, keys, Instant.now()); assertEquals(SecurityStatus.BOGUS, res.status); assertEquals(ExtendedErrorCodeOption.RRSIGS_MISSING, res.edeReason); } @Test void useAllEnumCode() { assertEquals( SecurityStatus.UNCHECKED, SecurityStatus.valueOf(SecurityStatus.values()[0].toString())); assertEquals( ResponseClassification.UNKNOWN, ResponseClassification.valueOf(ResponseClassification.values()[0].toString())); } @Test void testSmessageReturnsOptRecordOfOriginal() { int xrcode = 0xFED; Message m = Message.newQuery(Record.newRecord(Name.root, Type.NS, DClass.IN)); m.getHeader().setRcode(xrcode & 0xF); m.addRecord(new OPTRecord(1, xrcode >> 4, 1), Section.ADDITIONAL); SMessage sm = new SMessage(m); assertEquals(m.toString(), sm.getMessage().toString()); assertEquals(xrcode, sm.getRcode()); } @Test void testCopyMessageWithoutQuestion() { Message m = new Message(); m.addRecord(new ARecord(Name.root, DClass.IN, 120, localhost), Section.ANSWER); SMessage sm = new SMessage(m); assertEquals(m.toString(), sm.getMessage().toString()); } }
dnsjava/dnsjava
src/test/java/org/xbill/DNS/dnssec/TestNormallyUnreachableCode.java
Java
bsd-3-clause
2,380
## General Concepts The following concepts are the same for every endpoint in the API except when it's noted explicitly. ### Auth By default, users do not have access to private tables in CARTO. In order to instantiate a map from private table data an API Key is required. Additionally, an API Key is also required to use some of the API endpoints (e.g. to create a Named Map). To execute an authorized request, `api_key=YOURAPIKEY` should be added to the request URL. The param can be also passed as POST param. Using HTTPS is mandatory when you are performing requests that include your `api_key`. ### Errors Errors are reported using standard HTTP codes and extended information encoded in JSON with this format: ```javascript { "errors": [ "access forbidden to table TABLE" ] } ``` If you use JSONP, the 200 HTTP code is always returned so the JavaScript client can receive errors from the JSON object. ### CORS Support All the endpoints, which might be accessed using a web browser, add CORS headers and allow OPTIONS method. ### Map Tile Rendering Map tiles create the graphical representation of your map in a web browser. The performance rendering of map tiles is dependent on the type of geospatial data model (raster or vector) that you are using. - **Raster**: Generates map tiles based on a grid of pixels to represent your data. Each cell is a fixed size and contains values for particular map features. On the server-side, each request queries a dataset to retrieve data for each map tile. The grid size of map tiles can often lead to graphic quality issues. - **Vector**: Generates map tiles based on pre-defined coordinates to represent your data, similar to how basemap image tiles are rendered. On the client-side, map tiles represent real-world geometries of a map. Depending on the coordinates, vertices are used to connect the data and display points, lines, or polygons for the map tiles. **Note:** By default, CARTO uses vector graphics for map rendering. Please [contact us](mailto:support@carto.com) if you need raster rendering enabled as part of your requirements. ### Mapbox Vector Tiles (MVT) [Mapbox Vector Tiles (MVT)](https://www.mapbox.com/vector-tiles/specification/) are map tiles that store geographic vector data on the client-side. Browser performance is fast since you can pan and zoom without having to query the server. CARTO uses a Web Graphics Library (WebGL) to process MVT files. This is useful since WebGL's are compatible with most web browsers, include support for multiple client-side mapping engines, and do not require additional information from the server; which makes it more efficient for rendering map tiles. **Tip:** You can process MVT files with the [`ST_AsMVT` PostGIS function](https://postgis.net/docs/manual-dev/ST_AsMVT.html) with the [Maps API Windshaft renderer](https://github.com/CartoDB/Windshaft/blob/1000x/lib/windshaft/renderers/pg_mvt/renderer.js).
CartoDB/Windshaft-cartodb
docs/guides/02-general-concepts.md
Markdown
bsd-3-clause
2,957
import Rx from 'rxjs/Rx'; import { createMessage, } from '../kernel/messaging'; import { COMM_OPEN, COMM_MESSAGE, COMM_ERROR, NEW_KERNEL, } from '../constants'; /** * creates a comm open message * @param {string} comm_id uuid * @param {string} target_name comm handler * @param {any} data up to the target handler * @param {string} target_module [Optional] used to select a module that is responsible for handling the target_name * @return {jmp.Message} Message ready to send on the shell channel */ export function createCommOpenMessage(comm_id, target_name, data = {}, target_module) { const msg = createMessage('comm_open', { content: { comm_id, target_name, data } }); if (target_module) { msg.content.target_module = target_module; } return msg; } /** * creates a comm message for sending to a kernel * @param {string} comm_id unique identifier for the comm * @param {Object} data any data to send for the comm * @param {Uint8Array} buffers arbitrary binary data to send on the comm * @return {jmp.Message} jupyter message for comm_msg */ export function createCommMessage(comm_id, data = {}, buffers = new Uint8Array()) { return createMessage('comm_msg', { content: { comm_id, data }, buffers }); } /** * creates a comm close message for sending to a kernel * @param {Object} parent_header header from a parent jupyter message * @param {string} comm_id unique identifier for the comm * @param {Object} data any data to send for the comm * @return {jmp.Message} jupyter message for comm_msg */ export function createCommCloseMessage(parent_header, comm_id, data = {}) { return createMessage('comm_close', { content: { comm_id, data }, parent_header }); } /** * creates a comm error action * @param {error} error any type of error to pass on * @return {Object} Flux standard error action */ export const createCommErrorAction = error => Rx.Observable.of({ type: COMM_ERROR, payload: error, error: true, }); /** * Action creator for comm_open messages * @param {jmp.Message} a comm_open message * @return {Object} COMM_OPEN action */ export function commOpenAction(message) { // invariant: expects a comm_open message return { type: COMM_OPEN, data: message.content.data, metadata: message.content.metadata, comm_id: message.content.comm_id, target_name: message.content.target_name, target_module: message.content.target_module, // Pass through the buffers buffers: message.blob || message.buffers, // NOTE: Naming inconsistent between jupyter notebook and jmp // see https://github.com/n-riesco/jmp/issues/14 // We just expect either one }; } /** * Action creator for comm_msg messages * @param {jmp.Message} a comm_msg * @return {Object} COMM_MESSAGE action */ export function commMessageAction(message) { return { type: COMM_MESSAGE, comm_id: message.content.comm_id, data: message.content.data, // Pass through the buffers buffers: message.blob || message.buffers, // NOTE: Naming inconsistent between jupyter notebook and jmp // see https://github.com/n-riesco/jmp/issues/14 // We just expect either one }; } /** * creates all comm related actions given a new kernel action * @param {Object} newKernelAction a NEW_KERNEL action * @return {ActionsObservable} all actions resulting from comm messages on this kernel */ export function commActionObservable(newKernelAction) { const commOpenAction$ = newKernelAction.channels.iopub .ofMessageType(['comm_open']) .map(commOpenAction); const commMessageAction$ = newKernelAction.channels.iopub .ofMessageType(['comm_msg']) .map(commMessageAction); return Rx.Observable.merge( commOpenAction$, commMessageAction$ ).retry(); } /** * An epic that emits comm actions from the backend kernel * @param {ActionsObservable} action$ Action Observable from redux-observable * @param {redux.Store} store the redux store * @return {ActionsObservable} Comm actions */ export const commListenEpic = action$ => action$.ofType(NEW_KERNEL) // We have a new channel .switchMap(commActionObservable);
0u812/nteract
src/notebook/epics/comm.js
JavaScript
bsd-3-clause
4,327
-- -- Tabellenstruktur für Tabelle `item` -- CREATE TABLE IF NOT EXISTS `item` ( `id` smallint(6) NOT NULL, `title` varchar(255) NOT NULL, `data` tinyint(3) unsigned NOT NULL DEFAULT '0', PRIMARY KEY( `id`, `data` ) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -------------------------------------------------------- -- -- Tabellenstruktur für Tabelle `recipe` -- CREATE TABLE IF NOT EXISTS `recipe` ( `id` mediumint(8) unsigned NOT NULL AUTO_INCREMENT, `target_item_id` smallint(5) unsigned NOT NULL, `target_item_data` tinyint(3) unsigned NOT NULL DEFAULT '0', `target_count` tinyint(3) unsigned NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -------------------------------------------------------- -- -- Tabellenstruktur für Tabelle `recipe_ingredient` -- CREATE TABLE IF NOT EXISTS `recipe_ingredient` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `position_x` tinyint(3) unsigned NOT NULL, `position_y` tinyint(3) unsigned NOT NULL, `recipe_id` mediumint(8) unsigned NOT NULL, `item_id` smallint(5) unsigned NOT NULL, `item_data` tinyint(3) unsigned NOT NULL DEFAULT '0', `item_count` tinyint(3) unsigned NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
thelfensdrfer/Minecraft-Helper
migration/schema.sql
SQL
bsd-3-clause
1,256
# $FreeBSD: src/sys/modules/accf_data/Makefile,v 1.1.2.1 2000/07/28 04:03:47 alfred Exp $ .PATH: ${.CURDIR}/../../netinet KMOD = accf_data SRCS = accf_data.c .include <bsd.kmod.mk>
MarginC/kame
freebsd4/sys/modules/accf_data/Makefile
Makefile
bsd-3-clause
183
package edu.lu.uni.serval.idempotent.comm import com.google.gson.{Gson, JsonObject} import com.rabbitmq.client.AMQP.BasicProperties import com.rabbitmq.client._ /** * Created by darkrsw on 2016/November/22. */ object ResultSender { var host = "localhost" var QUEUE_NAME = "test" var ALARM_QUEUE_NAME = "js-alarms" var LOG_QUEUE_NAME = "js-logs" var PLOG_QUEUE_NAME = "js-prj-logs" var CLOG_QUEUE_NAME = "js-commit-logs" var TASK_QUEUE_NAME = "js-task-q" var RABBITMQ_USER = "" var RABBITMQ_PASSWD = "" val gson = new Gson() var conn: Connection = _ var channel: Channel = _ def init(): Unit = { val factory = new ConnectionFactory() factory.setHost(host) factory.setUsername(RABBITMQ_USER) factory.setPassword(RABBITMQ_PASSWD) conn = factory.newConnection() channel = conn.createChannel() } def getOneTask(): String = { val response = channel.basicGet(TASK_QUEUE_NAME, true) if( response == null ) return null val msg = new String(response.getBody, "UTF-8") return msg } def sendResult(queue: String, msg: String) = { channel.synchronized { channel.basicPublish("", queue, null, msg.getBytes()) } Console.println("To [%s] Sent '".format(queue) + msg + "'") } def sendCommitLog(project: String, commit: String, result: String, errmsg: String) = { val root = new JsonObject() root.addProperty("project", project) root.addProperty("commit", commit) root.addProperty("result", result) root.addProperty("errmsg", errmsg) sendResult(CLOG_QUEUE_NAME, gson.toJson(root)) } def sendProjectLog(project: String, result: String, errmsg: String) = { val root = new JsonObject() root.addProperty("project", project) root.addProperty("result", result) root.addProperty("errmsg", errmsg) sendResult(PLOG_QUEUE_NAME, gson.toJson(root)) } def sendLog(project: String, filepath: String, commit: String, result: String, errmsg: String) = { val root = new JsonObject() root.addProperty("project", project) root.addProperty("commit", commit) root.addProperty("filepath", filepath) root.addProperty("result", result) root.addProperty("errmsg", errmsg) sendResult(LOG_QUEUE_NAME, gson.toJson(root)) } def sendAlarm(project: String, commit: String, alarm: String) = { val root = new JsonObject() root.addProperty("project", project) root.addProperty("commit", commit) root.addProperty("alarm", alarm) sendResult(ALARM_QUEUE_NAME, gson.toJson(root)) } def close(): Unit = { channel.close() conn.close() } def main(args: Array[String]): Unit = { // TODO: just for testing. val alarm = """"/content/cn/steedos/buy.html","12","139:5038","139:5086","6","Conditional expression 'listprice.trim() == '' || listprice_rmb.trim() == ''' is always true."""".stripMargin val gson = new Gson() val root = new JsonObject() root.addProperty("project", "steedos$steedos") root.addProperty("commit", "ba40bb8429a81ecae3aae4e5acabaa90b122b2a8") root.addProperty("alarm", alarm) sendResult("js-alarms", gson.toJson(root)) val logroot = new JsonObject() logroot.addProperty("project", "steedos$steedos") logroot.addProperty("commit", "ba40bb8429a81ecae3aae4e5acabaa90b122b2a8") logroot.addProperty("log", "SUCCESS=>23748") sendResult("js-logs", gson.toJson(logroot)) close() Runtime.getRuntime.exit(0) } }
darkrsw/safe
src/main/scala/edu/lu/uni/serval/idempotent/comm/ResultSender.scala
Scala
bsd-3-clause
3,508
module Spree class DefaultTaxZoneValidator < ActiveModel::Validator def validate(record) if record.included_in_price record.errors.add(:included_in_price, I18n.t(:included_price_validation)) unless Zone.default_tax end end end end module Spree class TaxRate < ActiveRecord::Base belongs_to :zone, :class_name => "Spree::Zone" belongs_to :tax_category, :class_name => "Spree::TaxCategory" validates :amount, :presence => true, :numericality => true validates :tax_category_id, :presence => true validates_with DefaultTaxZoneValidator calculated_adjustments scope :by_zone, lambda { |zone| where(:zone_id => zone) } attr_accessible :amount, :tax_category_id, :calculator, :zone_id, :included_in_price # Gets the array of TaxRates appropriate for the specified order def self.match(order) return [] unless order.tax_zone all.select do |rate| rate.zone == order.tax_zone || rate.zone.contains?(order.tax_zone) || rate.zone.default_tax end end # For Vat the default rate is the rate that is configured for the default category # It is needed for every price calculation (as all customer facing prices include vat ) # The function returns the actual amount, which may be 0 in case of wrong setup, but is never nil def self.default category = TaxCategory.includes(:tax_rates).where(:is_default => true).first return 0 unless category address ||= Address.new(:country_id => Spree::Config[:default_country_id]) rate = category.tax_rates.detect { |rate| rate.zone.include? address }.try(:amount) rate || 0 end # Creates necessary tax adjustments for the order. def adjust(order) label = create_label if included_in_price if Zone.default_tax.contains? order.tax_zone order.line_items.each { |line_item| create_adjustment(label, line_item, line_item) } else amount = -1 * calculator.compute(order) label = I18n.t(:refund) + label order.adjustments.create({ :amount => amount, :source => order, :originator => self, :locked => true, :label => label }, :without_protection => true) end else create_adjustment(label, order, order) end end private def create_label "#{tax_category.name} #{amount * 100}%" end end end
NEWECX/spree_core
app/models/spree/tax_rate.rb
Ruby
bsd-3-clause
2,548
<!-- BEGIN: title -->User comments component<!-- END: title --> <!-- BEGIN: content --> {FILE "bo/node/default_top.html"} <fieldset> <div class="row"> <span class="label"><label for="component-allow_anonymouse_submit">Allow anonymous posts</label></span> <span class="field"><input type="checkbox" id="component-allow_anonymouse_submit" name="node[component][allow_anonymouse_submit]" {NODE.component.allow_anonymouse_submit} /></span> </div> </fieldset> {FILE "bo/node/default_bottom.html"} <!-- END: content -->
1nv4d3r5/onxshop
templates/bo/node/content/comment.html
HTML
bsd-3-clause
520
<?php // Evita acesso direto a este arquivo if ( ! defined('ABSPATH')) exit; ?> <div class="wrap"> <?php // Número de posts por página $modelo->posts_por_pagina = 10; // Lista notícias $lista = $modelo->listar_noticias(); ?> <?php foreach( $lista as $noticia ):?> <!-- Título --> <h1> <a href="<?php echo HOME_URI?>/noticias/index/<?php echo $noticia['noticia_id']?>"> <?php echo $noticia['noticia_titulo']?> </a> </h1> <?php // Verifica se estamos visualizando uma única notícia if ( is_numeric( chk_array( $modelo->parametros, 0 ) ) ): // single ?> <p> <?php echo $modelo->inverte_data( $noticia['noticia_data'] );?> | <?php echo $noticia['noticia_autor'];?> </p> <p> <img src="<?php echo HOME_URI . '/views/_uploads/' . $noticia['noticia_imagem']; ?>"> </p> <?php echo $noticia['noticia_texto'];?> <?php endif; // single ?> <?php endforeach; ?> <?php $modelo->paginacao();?> </div> <!-- .wrap -->
Hieracles/front-pem
views/noticias/noticias-view.php
PHP
bsd-3-clause
971
<?php namespace app\modules\loader\models; use Yii; /** * This is the model class for table "1c_load". * * @property resource $blob_data * @property integer $id */ class Loader extends \yii\db\ActiveRecord { /** * @inheritdoc */ public static function tableName() { return '1c_load'; } /** * @inheritdoc */ public function rules() { return [ [['blob_data'], 'string'] ]; } /** * @inheritdoc */ public function attributeLabels() { return [ 'blob_data' => 'Blob Data', 'id' => 'ID', ]; } }
kd-brinex/kd
modules/loader/models/Loader.php
PHP
bsd-3-clause
653
Installing Yii ============== You can install Yii in two ways, using [Composer](http://getcomposer.org/) or by downloading an archive file. The former is the preferred way, as it allows you to install new [extensions](extend-creating-extensions.md) or update Yii by simply running a single command. > Note: Unlike with Yii 1, standard installations of Yii 2 results in both the framework and an application skeleton being downloaded and installed. Installing via Composer <a name="installing-via-composer"></a> ----------------------- If you do not already have Composer installed, you may do so by following the instructions at [getcomposer.org](https://getcomposer.org/download/). On Linux and Mac OS X, you'll run the following commands: curl -s http://getcomposer.org/installer | php mv composer.phar /usr/local/bin/composer On Windows, you'll download and run [Composer-Setup.exe](https://getcomposer.org/Composer-Setup.exe). Please refer to the [Composer Documentation](https://getcomposer.org/doc/) if you encounter any problems or want to learn more about Composer usage. With Composer installed, you can install Yii by running the following commands under a Web-accessible folder: composer global require "fxp/composer-asset-plugin:1.0.0-beta1" composer create-project --prefer-dist yiisoft/yii2-app-basic basic The first command installs the [composer asset plugin](https://github.com/francoispluchino/composer-asset-plugin/) which allows managing bower and npm package dependencies through composer. You only need to run this command once for all. The second command installs Yii in a directory named `basic`. > Tip: If you want to install the latest development version of Yii, you may use the following command, > which adds a [stability option](https://getcomposer.org/doc/04-schema.md#minimum-stability): > > composer create-project --prefer-dist --stability=dev yiisoft/yii2-app-basic basic > > Note that the development version of Yii should not be used for production as it may break your running code. Installing from an Archive File <a name="installing-from-archive-file"></a> ------------------------------- Installing Yii from an archive file involves two steps: 1. Download the archive file from [yiiframework.com](http://www.yiiframework.com/download/yii2-basic). 2. Unpack the downloaded file to a Web-accessible folder. 3. Modify the `config/web.php` file by entering a secret key for the `cookieValidationKey` configuration item (this is done automatically if you are installing Yii using Composer): ```php // !!! insert a secret key in the following (if it is empty) - this is required by cookie validation 'cookieValidationKey' => 'enter your secret key here', ``` Other Installation Options <a name="other-installation-options"></a> -------------------------- The above installation instructions show how to install Yii, which also creates a basic Web application that works out of the box. This approach is a good starting point for small projects, or for when you just start learning Yii. But there are other installation options available: * If you only want to install the core framework and would like to build an entire application from scratch, you may follow the instructions as explained in [Building Application from Scratch](tutorial-start-from-scratch.md). * If you want to start with a more sophisticated application, better suited to team development environments, you may consider installing the [Advanced Application Template](tutorial-advanced-app.md). Verifying the Installation <a name="verifying-installation"></a> -------------------------- After installation, you can use your browser to access the installed Yii application with the following URL: ``` http://localhost/basic/web/index.php ``` This URL assumes you have installed Yii in a directory named `basic`, directly under the Web server's document root directory, and that the Web server is running on your local machine (`localhost`). You may need to adjust it to your installation environment. ![Successful Installation of Yii](images/start-app-installed.png) You should see the above "Congratulations!" page in your browser. If not, please check if your PHP installation satisfies Yii's requirements. You can check if the minimum requirements are met using one of the following approaches: * Use a browser to access the URL `http://localhost/basic/requirements.php` * Run the following commands: ``` cd basic php requirements.php ``` You should configure your PHP installation so that it meets the minimum requirements of Yii. Most importantly, you should have PHP 5.4 or above. You should also install the [PDO PHP Extension](http://www.php.net/manual/en/pdo.installation.php) and a corresponding database driver (such as `pdo_mysql` for MySQL databases), if your application needs a database. Configuring Web Servers <a name="configuring-web-servers"></a> ----------------------- > Info: You may skip this subsection for now if you are just test driving Yii with no intention of deploying it to a production server. The application installed according to the above instructions should work out of box with either an [Apache HTTP server](http://httpd.apache.org/) or an [Nginx HTTP server](http://nginx.org/), on Windows, Mac OS X, or Linux running PHP 5.4 or higher. Yii 2.0 is also compatible the facebooks [HHVM](http://hhvm.com/) however there are some edge cases where HHVM behaves different than native PHP so you have to take some extra care when using HHVM. On a production server, you may want to configure your Web server so that the application can be accessed via the URL `http://www.example.com/index.php` instead of `http://www.example.com/basic/web/index.php`. Such configuration requires pointing the document root of your Web server to the `basic/web` folder. You may also want to hide `index.php` from the URL, as described in the [URL Parsing and Generation](runtime-url-handling.md) section. In this subsection, you'll learn how to configure your Apache or Nginx server to achieve these goals. > Info: By setting `basic/web` as the document root, you also prevent end users from accessing your private application code and sensitive data files that are stored in the sibling directories of `basic/web`. Denying access to those other folders is a security improvement. > Info: If your application will run in a shared hosting environment where you do not have permission to modify its Web server configuration, you may still adjust the structure of your application for better security. Please refer to the [Shared Hosting Environment](tutorial-shared-hosting.md) section for more details. ### Recommended Apache Configuration <a name="recommended-apache-configuration"></a> Use the following configuration in Apache's `httpd.conf` file or within a virtual host configuration. Note that you should replace `path/to/basic/web` with the actual path for `basic/web`. ``` # Set document root to be "basic/web" DocumentRoot "path/to/basic/web" <Directory "path/to/basic/web"> RewriteEngine on # If a directory or a file exists, use the request directly RewriteCond %{REQUEST_FILENAME} !-f RewriteCond %{REQUEST_FILENAME} !-d # Otherwise forward the request to index.php RewriteRule . index.php # ...other settings... </Directory> ``` ### Recommended Nginx Configuration <a name="recommended-nginx-configuration"></a> You should have installed PHP as an [FPM SAPI](http://php.net/install.fpm) to use [Nginx](http://wiki.nginx.org/). Use the following Nginx configuration, replacing `path/to/basic/web` with the actual path for `basic/web` and `mysite.local` with the actual hostname to serve. ``` server { charset utf-8; client_max_body_size 128M; listen 80; ## listen for ipv4 #listen [::]:80 default_server ipv6only=on; ## listen for ipv6 server_name mysite.local; root /path/to/basic/web; index index.php; access_log /path/to/basic/log/access.log main; error_log /path/to/basic/log/error.log; location / { # Redirect everything that isn't a real file to index.php try_files $uri $uri/ /index.php?$args; } # uncomment to avoid processing of calls to non-existing static files by Yii #location ~ \.(js|css|png|jpg|gif|swf|ico|pdf|mov|fla|zip|rar)$ { # try_files $uri =404; #} #error_page 404 /404.html; location ~ \.php$ { include fastcgi.conf; fastcgi_pass 127.0.0.1:9000; #fastcgi_pass unix:/var/run/php5-fpm.sock; try_files $uri =404; } location ~ /\.(ht|svn|git) { deny all; } } ``` When using this configuration, you should also set `cgi.fix_pathinfo=0` in the `php.ini` file in order to avoid many unnecessary system `stat()` calls. Also note that when running an HTTPS server, you need to add `fastcgi_param HTTPS on;` so that Yii can properly detect if a connection is secure.
nkovacs/yii2
docs/guide/start-installation.md
Markdown
bsd-3-clause
9,006
package abi41_0_0.expo.modules.ads.facebook; import android.content.Context; import com.facebook.ads.MediaView; import abi41_0_0.org.unimodules.core.ViewManager; public class MediaViewManager extends ViewManager<MediaView> { @Override public String getName() { return "MediaView"; } @Override public MediaView createViewInstance(Context context) { return new MediaView(context); } @Override public ViewManagerType getViewManagerType() { return ViewManagerType.SIMPLE; } }
exponent/exponent
android/versioned-abis/expoview-abi41_0_0/src/main/java/abi41_0_0/expo/modules/ads/facebook/MediaViewManager.java
Java
bsd-3-clause
508
<?php use yii\helpers\Inflector; use yii\helpers\StringHelper; /* @var $this yii\web\View */ /* @var $generator yii\gii\generators\crud\Generator */ $urlParams = $generator->generateUrlParams(); $nameAttribute = $generator->getNameAttribute(); echo "<?php\n"; ?> use yii\helpers\Html; use <?= $generator->indexWidgetType === 'grid' ? "yii\\grid\\GridView" : "yii\\widgets\\ListView" ?>; <?= $generator->enablePjax ? 'use yii\widgets\Pjax;' : '' ?> /* @var $this yii\web\View */ <?= !empty($generator->searchModelClass) ? "/* @var \$searchModel " . ltrim($generator->searchModelClass, '\\') . " */\n" : '' ?> /* @var $dataProvider yii\data\ActiveDataProvider */ $this->title = <?= $generator->generateString(Inflector::pluralize(Inflector::camel2words(StringHelper::basename($generator->modelClass)))) ?>; $this->params['breadcrumbs'][] = $this->title; ?> <div class="<?= Inflector::camel2id(StringHelper::basename($generator->modelClass)) ?>-index"> <?php if(!empty($generator->searchModelClass)): ?> <?= " <?php " . ($generator->indexWidgetType === 'grid' ? " " : "") ?>echo $this->render('_search', ['model' => $searchModel]); ?> <?php endif; ?> <div class="box box-primary"> <div class="box-header with-border"> <h3 class="box-title"><?= "<?= " ?>Html::encode($this->title) ?></h3> </div> <div class="box-body table-responsive no-padding"> <?= $generator->enablePjax ? '<?php Pjax::begin(); ?>' : '' ?> <?php if ($generator->indexWidgetType === 'grid'): ?> <?= "<?= " ?>app\modules\admin\components\AppGridView::widget([ 'dataProvider' => $dataProvider, <?= !empty($generator->searchModelClass) ? "//'filterModel' => \$searchModel,\n 'columns' => [\n" : "'columns' => [\n"; ?> ['class' => 'app\modules\admin\components\AppSerialColumn'], [ 'class' => 'app\modules\admin\components\AppActionColumn', 'template' => '<span class=\'tbl_operation\'>{view}{update}{delete}</span>', ], <?php $count = 0; if (($tableSchema = $generator->getTableSchema()) === false) { foreach ($generator->getColumnNames() as $name) { if (++$count < 6) { echo " '" . $name . "',\n"; } else { echo " // '" . $name . "',\n"; } } } else { foreach ($tableSchema->columns as $column) { $format = $generator->generateColumnFormat($column); if (++$count < 6) { echo " '" . $column->name . ($format === 'text' ? "" : ":" . $format) . "',\n"; } else { echo " // '" . $column->name . ($format === 'text' ? "" : ":" . $format) . "',\n"; } } } ?> ], ]); ?> <?php else: ?> <?= "<?= " ?>ListView::widget([ 'dataProvider' => $dataProvider, 'itemOptions' => ['class' => 'item'], 'itemView' => function ($model, $key, $index, $widget) { return Html::a(Html::encode($model-><?= $nameAttribute ?>), ['view', <?= $urlParams ?>]); }, ]) ?> <?php endif; ?> <?= $generator->enablePjax ? '<?php Pjax::end(); ?>' : '' ?> </div> </div> </div>
tigergithub01/b2b2c
common/generators/crud-mobile/default/views/index.php
PHP
bsd-3-clause
3,149
// mgo - MongoDB driver for Go // // Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net> // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package mgo import ( "crypto/md5" "crypto/sha1" "encoding/hex" "errors" "fmt" "sync" "github.com/kylemclaren/mongo-transporter/Godeps/_workspace/src/gopkg.in/mgo.v2/bson" "github.com/kylemclaren/mongo-transporter/Godeps/_workspace/src/gopkg.in/mgo.v2/internal/scram" ) type authCmd struct { Authenticate int Nonce string User string Key string } type startSaslCmd struct { StartSASL int `bson:"startSasl"` } type authResult struct { ErrMsg string Ok bool } type getNonceCmd struct { GetNonce int } type getNonceResult struct { Nonce string Err string "$err" Code int } type logoutCmd struct { Logout int } type saslCmd struct { Start int `bson:"saslStart,omitempty"` Continue int `bson:"saslContinue,omitempty"` ConversationId int `bson:"conversationId,omitempty"` Mechanism string `bson:"mechanism,omitempty"` Payload []byte } type saslResult struct { Ok bool `bson:"ok"` NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) Done bool ConversationId int `bson:"conversationId"` Payload []byte ErrMsg string } type saslStepper interface { Step(serverData []byte) (clientData []byte, done bool, err error) Close() } func (socket *mongoSocket) getNonce() (nonce string, err error) { socket.Lock() for socket.cachedNonce == "" && socket.dead == nil { debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) socket.gotNonce.Wait() } if socket.cachedNonce == "mongos" { socket.Unlock() return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") } debugf("Socket %p to %s: got nonce", socket, socket.addr) nonce, err = socket.cachedNonce, socket.dead socket.cachedNonce = "" socket.Unlock() if err != nil { nonce = "" } return } func (socket *mongoSocket) resetNonce() { debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) op := &queryOp{} op.query = &getNonceCmd{GetNonce: 1} op.collection = "admin.$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { if err != nil { socket.kill(errors.New("getNonce: "+err.Error()), true) return } result := &getNonceResult{} err = bson.Unmarshal(docData, &result) if err != nil { socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) return } debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) if result.Code == 13390 { // mongos doesn't yet support auth (see http://j.mp/mongos-auth) result.Nonce = "mongos" } else if result.Nonce == "" { var msg string if result.Err != "" { msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) } else { msg = "Got an empty nonce" } socket.kill(errors.New(msg), true) return } socket.Lock() if socket.cachedNonce != "" { socket.Unlock() panic("resetNonce: nonce already cached") } socket.cachedNonce = result.Nonce socket.gotNonce.Signal() socket.Unlock() } err := socket.Query(op) if err != nil { socket.kill(errors.New("resetNonce: "+err.Error()), true) } } func (socket *mongoSocket) Login(cred Credential) error { socket.Lock() if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { cred.Mechanism = "SCRAM-SHA-1" } for _, sockCred := range socket.creds { if sockCred == cred { debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) socket.Unlock() return nil } } if socket.dropLogout(cred) { debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) socket.creds = append(socket.creds, cred) socket.Unlock() return nil } socket.Unlock() debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) var err error switch cred.Mechanism { case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. err = socket.loginClassic(cred) case "PLAIN": err = socket.loginPlain(cred) case "MONGODB-X509": err = socket.loginX509(cred) default: // Try SASL for everything else, if it is available. err = socket.loginSASL(cred) } if err != nil { debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) } else { debugf("Socket %p to %s: login successful", socket, socket.addr) } return err } func (socket *mongoSocket) loginClassic(cred Credential) error { // Note that this only works properly because this function is // synchronous, which means the nonce won't get reset while we're // using it and any other login requests will block waiting for a // new nonce provided in the defer call below. nonce, err := socket.getNonce() if err != nil { return err } defer socket.resetNonce() psum := md5.New() psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) ksum := md5.New() ksum.Write([]byte(nonce + cred.Username)) ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) key := hex.EncodeToString(ksum.Sum(nil)) cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } type authX509Cmd struct { Authenticate int User string Mechanism string } func (socket *mongoSocket) loginX509(cred Credential) error { cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginPlain(cred Credential) error { cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} res := authResult{} return socket.loginRun(cred.Source, &cmd, &res, func() error { if !res.Ok { return errors.New(res.ErrMsg) } socket.Lock() socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) socket.Unlock() return nil }) } func (socket *mongoSocket) loginSASL(cred Credential) error { var sasl saslStepper var err error if cred.Mechanism == "SCRAM-SHA-1" { // SCRAM is handled without external libraries. sasl = saslNewScram(cred) } else if len(cred.ServiceHost) > 0 { sasl, err = saslNew(cred, cred.ServiceHost) } else { sasl, err = saslNew(cred, socket.Server().Addr) } if err != nil { return err } defer sasl.Close() // The goal of this logic is to carry a locked socket until the // local SASL step confirms the auth is valid; the socket needs to be // locked so that concurrent action doesn't leave the socket in an // auth state that doesn't reflect the operations that took place. // As a simple case, imagine inverting login=>logout to logout=>login. // // The logic below works because the lock func isn't called concurrently. locked := false lock := func(b bool) { if locked != b { locked = b if b { socket.Lock() } else { socket.Unlock() } } } lock(true) defer lock(false) start := 1 cmd := saslCmd{} res := saslResult{} for { payload, done, err := sasl.Step(res.Payload) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } lock(false) cmd = saslCmd{ Start: start, Continue: 1 - start, ConversationId: res.ConversationId, Mechanism: cred.Mechanism, Payload: payload, } start = 0 err = socket.loginRun(cred.Source, &cmd, &res, func() error { // See the comment on lock for why this is necessary. lock(true) if !res.Ok || res.NotOk { return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) } return nil }) if err != nil { return err } if done && res.Done { socket.dropAuth(cred.Source) socket.creds = append(socket.creds, cred) break } } return nil } func saslNewScram(cred Credential) *saslScram { credsum := md5.New() credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) return &saslScram{cred: cred, client: client} } type saslScram struct { cred Credential client *scram.Client } func (s *saslScram) Close() {} func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { more := s.client.Step(serverData) return s.client.Out(), !more, s.client.Err() } func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { var mutex sync.Mutex var replyErr error mutex.Lock() op := queryOp{} op.query = query op.collection = db + ".$cmd" op.limit = -1 op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { defer mutex.Unlock() if err != nil { replyErr = err return } err = bson.Unmarshal(docData, result) if err != nil { replyErr = err } else { // Must handle this within the read loop for the socket, so // that concurrent login requests are properly ordered. replyErr = f() } } err := socket.Query(&op) if err != nil { return err } mutex.Lock() // Wait. return replyErr } func (socket *mongoSocket) Logout(db string) { socket.Lock() cred, found := socket.dropAuth(db) if found { debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) socket.logout = append(socket.logout, cred) } socket.Unlock() } func (socket *mongoSocket) LogoutAll() { socket.Lock() if l := len(socket.creds); l > 0 { debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) socket.logout = append(socket.logout, socket.creds...) socket.creds = socket.creds[0:0] } socket.Unlock() } func (socket *mongoSocket) flushLogout() (ops []interface{}) { socket.Lock() if l := len(socket.logout); l > 0 { debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) for i := 0; i != l; i++ { op := queryOp{} op.query = &logoutCmd{1} op.collection = socket.logout[i].Source + ".$cmd" op.limit = -1 ops = append(ops, &op) } socket.logout = socket.logout[0:0] } socket.Unlock() return } func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { for i, sockCred := range socket.creds { if sockCred.Source == db { copy(socket.creds[i:], socket.creds[i+1:]) socket.creds = socket.creds[:len(socket.creds)-1] return sockCred, true } } return cred, false } func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { for i, sockCred := range socket.logout { if sockCred == cred { copy(socket.logout[i:], socket.logout[i+1:]) socket.logout = socket.logout[:len(socket.logout)-1] return true } } return false }
kylemclaren/mongo-transporter
Godeps/_workspace/src/gopkg.in/mgo.v2/auth.go
GO
bsd-3-clause
12,555
package spark.scheduler import java.net.URI import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.Future import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.TimeUnit import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue, Map} import spark._ import spark.partial.ApproximateActionListener import spark.partial.ApproximateEvaluator import spark.partial.PartialResult import spark.storage.BlockManagerMaster import spark.storage.BlockManagerId import util.{MetadataCleaner, TimeStampedHashMap} /** * A Scheduler subclass that implements stage-oriented scheduling. It computes a DAG of stages for * each job, keeps track of which RDDs and stage outputs are materialized, and computes a minimal * schedule to run the job. Subclasses only need to implement the code to send a task to the cluster * and to report fetch failures (the submitTasks method, and code to add CompletionEvents). */ private[spark] class DAGScheduler( taskSched: TaskScheduler, mapOutputTracker: MapOutputTracker, blockManagerMaster: BlockManagerMaster, env: SparkEnv) extends TaskSchedulerListener with Logging { def this(taskSched: TaskScheduler) { this(taskSched, SparkEnv.get.mapOutputTracker, SparkEnv.get.blockManager.master, SparkEnv.get) } taskSched.setListener(this) // Called by TaskScheduler to report task completions or failures. override def taskEnded( task: Task[_], reason: TaskEndReason, result: Any, accumUpdates: Map[Long, Any]) { eventQueue.put(CompletionEvent(task, reason, result, accumUpdates)) } // Called by TaskScheduler when an executor fails. override def executorLost(execId: String) { eventQueue.put(ExecutorLost(execId)) } // Called by TaskScheduler to cancel an entire TaskSet due to repeated failures. override def taskSetFailed(taskSet: TaskSet, reason: String) { eventQueue.put(TaskSetFailed(taskSet, reason)) } // The time, in millis, to wait for fetch failure events to stop coming in after one is detected; // this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one // as more failure events come in val RESUBMIT_TIMEOUT = 50L // The time, in millis, to wake up between polls of the completion queue in order to potentially // resubmit failed stages val POLL_TIMEOUT = 10L private val eventQueue = new LinkedBlockingQueue[DAGSchedulerEvent] val nextRunId = new AtomicInteger(0) val nextStageId = new AtomicInteger(0) val idToStage = new TimeStampedHashMap[Int, Stage] val shuffleToMapStage = new TimeStampedHashMap[Int, Stage] var cacheLocs = new HashMap[Int, Array[List[String]]] // For tracking failed nodes, we use the MapOutputTracker's generation number, which is // sent with every task. When we detect a node failing, we note the current generation number // and failed executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask // results. // TODO: Garbage collect information about failure generations when we know there are no more // stray messages to detect. val failedGeneration = new HashMap[String, Long] val waiting = new HashSet[Stage] // Stages we need to run whose parents aren't done val running = new HashSet[Stage] // Stages we are running right now val failed = new HashSet[Stage] // Stages that must be resubmitted due to fetch failures val pendingTasks = new TimeStampedHashMap[Stage, HashSet[Task[_]]] // Missing tasks from each stage var lastFetchFailureTime: Long = 0 // Used to wait a bit to avoid repeated resubmits val activeJobs = new HashSet[ActiveJob] val resultStageToJob = new HashMap[Stage, ActiveJob] val metadataCleaner = new MetadataCleaner("DAGScheduler", this.cleanup) // Start a thread to run the DAGScheduler event loop def start() { new Thread("DAGScheduler") { setDaemon(true) override def run() { DAGScheduler.this.run() } }.start() } private def getCacheLocs(rdd: RDD[_]): Array[List[String]] = { if (!cacheLocs.contains(rdd.id)) { val blockIds = rdd.partitions.indices.map(index=> "rdd_%d_%d".format(rdd.id, index)).toArray cacheLocs(rdd.id) = blockManagerMaster.getLocations(blockIds).map { locations => locations.map(_.ip).toList }.toArray } cacheLocs(rdd.id) } private def clearCacheLocs() { cacheLocs.clear() } /** * Get or create a shuffle map stage for the given shuffle dependency's map side. * The priority value passed in will be used if the stage doesn't already exist with * a lower priority (we assume that priorities always increase across jobs for now). */ private def getShuffleMapStage(shuffleDep: ShuffleDependency[_,_], priority: Int): Stage = { shuffleToMapStage.get(shuffleDep.shuffleId) match { case Some(stage) => stage case None => val stage = newStage(shuffleDep.rdd, Some(shuffleDep), priority) shuffleToMapStage(shuffleDep.shuffleId) = stage stage } } /** * Create a Stage for the given RDD, either as a shuffle map stage (for a ShuffleDependency) or * as a result stage for the final RDD used directly in an action. The stage will also be given * the provided priority. */ private def newStage(rdd: RDD[_], shuffleDep: Option[ShuffleDependency[_,_]], priority: Int): Stage = { if (shuffleDep != None) { // Kind of ugly: need to register RDDs with the cache and map output tracker here // since we can't do it in the RDD constructor because # of partitions is unknown logInfo("Registering RDD " + rdd.id + " (" + rdd.origin + ")") mapOutputTracker.registerShuffle(shuffleDep.get.shuffleId, rdd.partitions.size) } val id = nextStageId.getAndIncrement() val stage = new Stage(id, rdd, shuffleDep, getParentStages(rdd, priority), priority) idToStage(id) = stage stage } /** * Get or create the list of parent stages for a given RDD. The stages will be assigned the * provided priority if they haven't already been created with a lower priority. */ private def getParentStages(rdd: RDD[_], priority: Int): List[Stage] = { val parents = new HashSet[Stage] val visited = new HashSet[RDD[_]] def visit(r: RDD[_]) { if (!visited(r)) { visited += r // Kind of ugly: need to register RDDs with the cache here since // we can't do it in its constructor because # of partitions is unknown for (dep <- r.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => parents += getShuffleMapStage(shufDep, priority) case _ => visit(dep.rdd) } } } } visit(rdd) parents.toList } private def getMissingParentStages(stage: Stage): List[Stage] = { val missing = new HashSet[Stage] val visited = new HashSet[RDD[_]] def visit(rdd: RDD[_]) { if (!visited(rdd)) { visited += rdd if (getCacheLocs(rdd).contains(Nil)) { for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => val mapStage = getShuffleMapStage(shufDep, stage.priority) if (!mapStage.isAvailable) { missing += mapStage } case narrowDep: NarrowDependency[_] => visit(narrowDep.rdd) } } } } } visit(stage.rdd) missing.toList } /** * Returns (and does not submit) a JobSubmitted event suitable to run a given job, and a * JobWaiter whose getResult() method will return the result of the job when it is complete. * * The job is assumed to have at least one partition; zero partition jobs should be handled * without a JobSubmitted event. */ private[scheduler] def prepareJob[T, U: ClassManifest]( finalRdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: String, allowLocal: Boolean, resultHandler: (Int, U) => Unit) : (JobSubmitted, JobWaiter[U]) = { assert(partitions.size > 0) val waiter = new JobWaiter(partitions.size, resultHandler) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] val toSubmit = JobSubmitted(finalRdd, func2, partitions.toArray, allowLocal, callSite, waiter) return (toSubmit, waiter) } def runJob[T, U: ClassManifest]( finalRdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: String, allowLocal: Boolean, resultHandler: (Int, U) => Unit) { if (partitions.size == 0) { return } val (toSubmit, waiter) = prepareJob( finalRdd, func, partitions, callSite, allowLocal, resultHandler) eventQueue.put(toSubmit) waiter.awaitResult() match { case JobSucceeded => {} case JobFailed(exception: Exception) => logInfo("Failed to run " + callSite) throw exception } } def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], callSite: String, timeout: Long) : PartialResult[R] = { val listener = new ApproximateActionListener(rdd, func, evaluator, timeout) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] val partitions = (0 until rdd.partitions.size).toArray eventQueue.put(JobSubmitted(rdd, func2, partitions, false, callSite, listener)) return listener.awaitResult() // Will throw an exception if the job fails } /** * Process one event retrieved from the event queue. * Returns true if we should stop the event loop. */ private[scheduler] def processEvent(event: DAGSchedulerEvent): Boolean = { event match { case JobSubmitted(finalRDD, func, partitions, allowLocal, callSite, listener) => val runId = nextRunId.getAndIncrement() val finalStage = newStage(finalRDD, None, runId) val job = new ActiveJob(runId, finalStage, func, partitions, callSite, listener) clearCacheLocs() logInfo("Got job " + job.runId + " (" + callSite + ") with " + partitions.length + " output partitions (allowLocal=" + allowLocal + ")") logInfo("Final stage: " + finalStage + " (" + finalStage.origin + ")") logInfo("Parents of final stage: " + finalStage.parents) logInfo("Missing parents: " + getMissingParentStages(finalStage)) if (allowLocal && finalStage.parents.size == 0 && partitions.length == 1) { // Compute very short actions like first() or take() with no parent stages locally. runLocally(job) } else { activeJobs += job resultStageToJob(finalStage) = job submitStage(finalStage) } case ExecutorLost(execId) => handleExecutorLost(execId) case completion: CompletionEvent => handleTaskCompletion(completion) case TaskSetFailed(taskSet, reason) => abortStage(idToStage(taskSet.stageId), reason) case StopDAGScheduler => // Cancel any active jobs for (job <- activeJobs) { val error = new SparkException("Job cancelled because SparkContext was shut down") job.listener.jobFailed(error) } return true } return false } /** * Resubmit any failed stages. Ordinarily called after a small amount of time has passed since * the last fetch failure. */ private[scheduler] def resubmitFailedStages() { logInfo("Resubmitting failed stages") clearCacheLocs() val failed2 = failed.toArray failed.clear() for (stage <- failed2.sortBy(_.priority)) { submitStage(stage) } } /** * Check for waiting or failed stages which are now eligible for resubmission. * Ordinarily run on every iteration of the event loop. */ private[scheduler] def submitWaitingStages() { // TODO: We might want to run this less often, when we are sure that something has become // runnable that wasn't before. logTrace("Checking for newly runnable parent stages") logTrace("running: " + running) logTrace("waiting: " + waiting) logTrace("failed: " + failed) val waiting2 = waiting.toArray waiting.clear() for (stage <- waiting2.sortBy(_.priority)) { submitStage(stage) } } /** * The main event loop of the DAG scheduler, which waits for new-job / task-finished / failure * events and responds by launching tasks. This runs in a dedicated thread and receives events * via the eventQueue. */ private def run() { SparkEnv.set(env) while (true) { val event = eventQueue.poll(POLL_TIMEOUT, TimeUnit.MILLISECONDS) if (event != null) { logDebug("Got event of type " + event.getClass.getName) } if (event != null) { if (processEvent(event)) { return } } val time = System.currentTimeMillis() // TODO: use a pluggable clock for testability // Periodically resubmit failed stages if some map output fetches have failed and we have // waited at least RESUBMIT_TIMEOUT. We wait for this short time because when a node fails, // tasks on many other nodes are bound to get a fetch failure, and they won't all get it at // the same time, so we want to make sure we've identified all the reduce tasks that depend // on the failed node. if (failed.size > 0 && time > lastFetchFailureTime + RESUBMIT_TIMEOUT) { resubmitFailedStages() } else { submitWaitingStages() } } } /** * Run a job on an RDD locally, assuming it has only a single partition and no dependencies. * We run the operation in a separate thread just in case it takes a bunch of time, so that we * don't block the DAGScheduler event loop or other concurrent jobs. */ private def runLocally(job: ActiveJob) { logInfo("Computing the requested partition locally") new Thread("Local computation of job " + job.runId) { override def run() { try { SparkEnv.set(env) val rdd = job.finalStage.rdd val split = rdd.partitions(job.partitions(0)) val taskContext = new TaskContext(job.finalStage.id, job.partitions(0), 0) try { val result = job.func(taskContext, rdd.iterator(split, taskContext)) job.listener.taskSucceeded(0, result) } finally { taskContext.executeOnCompleteCallbacks() } } catch { case e: Exception => job.listener.jobFailed(e) } } }.start() } /** Submits stage, but first recursively submits any missing parents. */ private def submitStage(stage: Stage) { logDebug("submitStage(" + stage + ")") if (!waiting(stage) && !running(stage) && !failed(stage)) { val missing = getMissingParentStages(stage).sortBy(_.id) logDebug("missing: " + missing) if (missing == Nil) { logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents") submitMissingTasks(stage) running += stage } else { for (parent <- missing) { submitStage(parent) } waiting += stage } } } /** Called when stage's parents are available and we can now do its task. */ private def submitMissingTasks(stage: Stage) { logDebug("submitMissingTasks(" + stage + ")") // Get our pending tasks and remember them in our pendingTasks entry val myPending = pendingTasks.getOrElseUpdate(stage, new HashSet) myPending.clear() var tasks = ArrayBuffer[Task[_]]() if (stage.isShuffleMap) { for (p <- 0 until stage.numPartitions if stage.outputLocs(p) == Nil) { val locs = getPreferredLocs(stage.rdd, p) tasks += new ShuffleMapTask(stage.id, stage.rdd, stage.shuffleDep.get, p, locs) } } else { // This is a final stage; figure out its job's missing partitions val job = resultStageToJob(stage) for (id <- 0 until job.numPartitions if (!job.finished(id))) { val partition = job.partitions(id) val locs = getPreferredLocs(stage.rdd, partition) tasks += new ResultTask(stage.id, stage.rdd, job.func, partition, locs, id) } } if (tasks.size > 0) { logInfo("Submitting " + tasks.size + " missing tasks from " + stage + " (" + stage.rdd + ")") myPending ++= tasks logDebug("New pending tasks: " + myPending) taskSched.submitTasks( new TaskSet(tasks.toArray, stage.id, stage.newAttemptId(), stage.priority)) if (!stage.submissionTime.isDefined) { stage.submissionTime = Some(System.currentTimeMillis()) } } else { logDebug("Stage " + stage + " is actually done; %b %d %d".format( stage.isAvailable, stage.numAvailableOutputs, stage.numPartitions)) running -= stage } } /** * Responds to a task finishing. This is called inside the event loop so it assumes that it can * modify the scheduler's internal state. Use taskEnded() to post a task end event from outside. */ private def handleTaskCompletion(event: CompletionEvent) { val task = event.task val stage = idToStage(task.stageId) def markStageAsFinished(stage: Stage) = { val serviceTime = stage.submissionTime match { case Some(t) => "%.03f".format((System.currentTimeMillis() - t) / 1000.0) case _ => "Unkown" } logInfo("%s (%s) finished in %s s".format(stage, stage.origin, serviceTime)) running -= stage } event.reason match { case Success => logInfo("Completed " + task) if (event.accumUpdates != null) { Accumulators.add(event.accumUpdates) // TODO: do this only if task wasn't resubmitted } pendingTasks(stage) -= task task match { case rt: ResultTask[_, _] => resultStageToJob.get(stage) match { case Some(job) => if (!job.finished(rt.outputId)) { job.finished(rt.outputId) = true job.numFinished += 1 // If the whole job has finished, remove it if (job.numFinished == job.numPartitions) { activeJobs -= job resultStageToJob -= stage markStageAsFinished(stage) } job.listener.taskSucceeded(rt.outputId, event.result) } case None => logInfo("Ignoring result from " + rt + " because its job has finished") } case smt: ShuffleMapTask => val stage = idToStage(smt.stageId) val status = event.result.asInstanceOf[MapStatus] val execId = status.location.executorId logDebug("ShuffleMapTask finished on " + execId) if (failedGeneration.contains(execId) && smt.generation <= failedGeneration(execId)) { logInfo("Ignoring possibly bogus ShuffleMapTask completion from " + execId) } else { stage.addOutputLoc(smt.partition, status) } if (running.contains(stage) && pendingTasks(stage).isEmpty) { markStageAsFinished(stage) logInfo("looking for newly runnable stages") logInfo("running: " + running) logInfo("waiting: " + waiting) logInfo("failed: " + failed) if (stage.shuffleDep != None) { // We supply true to increment the generation number here in case this is a // recomputation of the map outputs. In that case, some nodes may have cached // locations with holes (from when we detected the error) and will need the // generation incremented to refetch them. // TODO: Only increment the generation number if this is not the first time // we registered these map outputs. mapOutputTracker.registerMapOutputs( stage.shuffleDep.get.shuffleId, stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray, true) } clearCacheLocs() if (stage.outputLocs.count(_ == Nil) != 0) { // Some tasks had failed; let's resubmit this stage // TODO: Lower-level scheduler should also deal with this logInfo("Resubmitting " + stage + " (" + stage.origin + ") because some of its tasks had failed: " + stage.outputLocs.zipWithIndex.filter(_._1 == Nil).map(_._2).mkString(", ")) submitStage(stage) } else { val newlyRunnable = new ArrayBuffer[Stage] for (stage <- waiting) { logInfo("Missing parents for " + stage + ": " + getMissingParentStages(stage)) } for (stage <- waiting if getMissingParentStages(stage) == Nil) { newlyRunnable += stage } waiting --= newlyRunnable running ++= newlyRunnable for (stage <- newlyRunnable.sortBy(_.id)) { logInfo("Submitting " + stage + " (" + stage.rdd + "), which is now runnable") submitMissingTasks(stage) } } } } case Resubmitted => logInfo("Resubmitted " + task + ", so marking it as still running") pendingTasks(stage) += task case FetchFailed(bmAddress, shuffleId, mapId, reduceId) => // Mark the stage that the reducer was in as unrunnable val failedStage = idToStage(task.stageId) running -= failedStage failed += failedStage // TODO: Cancel running tasks in the stage logInfo("Marking " + failedStage + " (" + failedStage.origin + ") for resubmision due to a fetch failure") // Mark the map whose fetch failed as broken in the map stage val mapStage = shuffleToMapStage(shuffleId) if (mapId != -1) { mapStage.removeOutputLoc(mapId, bmAddress) mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress) } logInfo("The failed fetch was from " + mapStage + " (" + mapStage.origin + "); marking it for resubmission") failed += mapStage // Remember that a fetch failed now; this is used to resubmit the broken // stages later, after a small wait (to give other tasks the chance to fail) lastFetchFailureTime = System.currentTimeMillis() // TODO: Use pluggable clock // TODO: mark the executor as failed only if there were lots of fetch failures on it if (bmAddress != null) { handleExecutorLost(bmAddress.executorId, Some(task.generation)) } case other => // Non-fetch failure -- probably a bug in user code; abort all jobs depending on this stage abortStage(idToStage(task.stageId), task + " failed: " + other) } } /** * Responds to an executor being lost. This is called inside the event loop, so it assumes it can * modify the scheduler's internal state. Use executorLost() to post a loss event from outside. * * Optionally the generation during which the failure was caught can be passed to avoid allowing * stray fetch failures from possibly retriggering the detection of a node as lost. */ private def handleExecutorLost(execId: String, maybeGeneration: Option[Long] = None) { val currentGeneration = maybeGeneration.getOrElse(mapOutputTracker.getGeneration) if (!failedGeneration.contains(execId) || failedGeneration(execId) < currentGeneration) { failedGeneration(execId) = currentGeneration logInfo("Executor lost: %s (generation %d)".format(execId, currentGeneration)) blockManagerMaster.removeExecutor(execId) // TODO: This will be really slow if we keep accumulating shuffle map stages for ((shuffleId, stage) <- shuffleToMapStage) { stage.removeOutputsOnExecutor(execId) val locs = stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray mapOutputTracker.registerMapOutputs(shuffleId, locs, true) } if (shuffleToMapStage.isEmpty) { mapOutputTracker.incrementGeneration() } clearCacheLocs() } else { logDebug("Additional executor lost message for " + execId + "(generation " + currentGeneration + ")") } } /** * Aborts all jobs depending on a particular Stage. This is called in response to a task set * being cancelled by the TaskScheduler. Use taskSetFailed() to inject this event from outside. */ private def abortStage(failedStage: Stage, reason: String) { val dependentStages = resultStageToJob.keys.filter(x => stageDependsOn(x, failedStage)).toSeq for (resultStage <- dependentStages) { val job = resultStageToJob(resultStage) job.listener.jobFailed(new SparkException("Job failed: " + reason)) activeJobs -= job resultStageToJob -= resultStage } if (dependentStages.isEmpty) { logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done") } } /** * Return true if one of stage's ancestors is target. */ private def stageDependsOn(stage: Stage, target: Stage): Boolean = { if (stage == target) { return true } val visitedRdds = new HashSet[RDD[_]] val visitedStages = new HashSet[Stage] def visit(rdd: RDD[_]) { if (!visitedRdds(rdd)) { visitedRdds += rdd for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => val mapStage = getShuffleMapStage(shufDep, stage.priority) if (!mapStage.isAvailable) { visitedStages += mapStage visit(mapStage.rdd) } // Otherwise there's no need to follow the dependency back case narrowDep: NarrowDependency[_] => visit(narrowDep.rdd) } } } } visit(stage.rdd) visitedRdds.contains(target.rdd) } private def getPreferredLocs(rdd: RDD[_], partition: Int): List[String] = { // If the partition is cached, return the cache locations val cached = getCacheLocs(rdd)(partition) if (cached != Nil) { return cached } // If the RDD has some placement preferences (as is the case for input RDDs), get those val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList if (rddPrefs != Nil) { return rddPrefs } // If the RDD has narrow dependencies, pick the first partition of the first narrow dep // that has any placement preferences. Ideally we would choose based on transfer sizes, // but this will do for now. rdd.dependencies.foreach(_ match { case n: NarrowDependency[_] => for (inPart <- n.getParents(partition)) { val locs = getPreferredLocs(n.rdd, inPart) if (locs != Nil) return locs } case _ => }) return Nil } private def cleanup(cleanupTime: Long) { var sizeBefore = idToStage.size idToStage.clearOldValues(cleanupTime) logInfo("idToStage " + sizeBefore + " --> " + idToStage.size) sizeBefore = shuffleToMapStage.size shuffleToMapStage.clearOldValues(cleanupTime) logInfo("shuffleToMapStage " + sizeBefore + " --> " + shuffleToMapStage.size) sizeBefore = pendingTasks.size pendingTasks.clearOldValues(cleanupTime) logInfo("pendingTasks " + sizeBefore + " --> " + pendingTasks.size) } def stop() { eventQueue.put(StopDAGScheduler) metadataCleaner.cancel() taskSched.stop() } }
hobinyoon/spark-0.7.0
core/src/main/scala/spark/scheduler/DAGScheduler.scala
Scala
bsd-3-clause
28,031
require 'json' require 'rest-client' module Ebi class OlsClient def all_descendants(ontology_id, term_iri) Rails.cache.fetch("ebi_ontology_terms_#{ontology_id}_#{term_iri}") do url = "https://www.ebi.ac.uk/ols/api/ontologies/#{ontology_id}/terms/#{double_url_encode(term_iri)}" self_json = JSON.parse(RestClient.get(url, accept: :json)) @collected_iris = [] all_children(self_json) end end def all_children(term_json, parent_iri = nil) @collected_iris << term_json['iri'] term = { iri: term_json['iri'], label: term_json['label'] } term[:parent_iri] = parent_iri if parent_iri url = "https://www.ebi.ac.uk/ols/api/ontologies/#{term_json['ontology_name']}/terms/#{double_url_encode(term_json['iri'])}/children" child_terms = [] if term_json['has_children'] loop do Rails.logger.debug("[OLS] Fetching #{url}...") j = JSON.parse(RestClient.get(url, accept: :json)) child_terms += (j.dig('_embedded', 'terms') || []) url = j.dig('_links', 'next', 'href') break unless url end end terms = [term] child_terms.each do |child_json| next if @collected_iris.include?(child_json['iri']) terms += all_children(child_json, term_json['iri']) end terms end def self.ontologies return @ontologies if @ontologies ontology_list = begin Rails.cache.fetch('ebi_ontology_options') do JSON.parse(RestClient.get('https://www.ebi.ac.uk/ols/api/ontologies?size=1000', accept: :json)) end rescue StandardError nil end ontology_list ||= JSON.parse(File.read(Rails.root.join('config', 'ontologies', 'ebi_ontologies.json'))) @ontologies = ontology_list.dig('_embedded', 'ontologies') end def self.ontology_keys @ontology_keys ||= ontologies.map { |ontology| ontology.dig('config', 'namespace') }.sort.compact end private def double_url_encode(id) CGI.escape(CGI.escape(id)) # Yes this is correct end end end
seek4science/seek
lib/ebi/ols_client.rb
Ruby
bsd-3-clause
2,169
package com.fsck.k9.mailstore; import java.util.ArrayList; import java.util.List; import java.util.Locale; import android.content.ContentValues; import android.content.SharedPreferences; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteException; import android.util.Log; import com.fsck.k9.Account; import com.fsck.k9.K9; import com.fsck.k9.R; import com.fsck.k9.helper.Utility; import com.fsck.k9.mail.Flag; import com.fsck.k9.mail.Folder; import com.fsck.k9.mail.Message; import com.fsck.k9.provider.AttachmentProvider; class StoreSchemaDefinition implements LockableDatabase.SchemaDefinition { /** * */ private final LocalStore localStore; /** * @param localStore */ StoreSchemaDefinition(LocalStore localStore) { this.localStore = localStore; } @Override public int getVersion() { return LocalStore.DB_VERSION; } @Override public void doDbUpgrade(final SQLiteDatabase db) { try { upgradeDatabase(db); } catch (Exception e) { Log.e(K9.LOG_TAG, "Exception while upgrading database. Resetting the DB to v0", e); db.setVersion(0); upgradeDatabase(db); } } private void upgradeDatabase(final SQLiteDatabase db) { Log.i(K9.LOG_TAG, String.format(Locale.US, "Upgrading database from version %d to version %d", db.getVersion(), LocalStore.DB_VERSION)); AttachmentProvider.clear(this.localStore.context); db.beginTransaction(); try { // schema version 29 was when we moved to incremental updates // in the case of a new db or a < v29 db, we blow away and start from scratch if (db.getVersion() < 29) { db.execSQL("DROP TABLE IF EXISTS folders"); db.execSQL("CREATE TABLE folders (id INTEGER PRIMARY KEY, name TEXT, " + "last_updated INTEGER, unread_count INTEGER, visible_limit INTEGER, status TEXT, " + "push_state TEXT, last_pushed INTEGER, flagged_count INTEGER default 0, " + "integrate INTEGER, top_group INTEGER, poll_class TEXT, push_class TEXT, display_class TEXT, notify_class TEXT" + ")"); db.execSQL("CREATE INDEX IF NOT EXISTS folder_name ON folders (name)"); db.execSQL("DROP TABLE IF EXISTS messages"); db.execSQL("CREATE TABLE messages (" + "id INTEGER PRIMARY KEY, " + "deleted INTEGER default 0, " + "folder_id INTEGER, " + "uid TEXT, " + "subject TEXT, " + "date INTEGER, " + "flags TEXT, " + "sender_list TEXT, " + "to_list TEXT, " + "cc_list TEXT, " + "bcc_list TEXT, " + "reply_to_list TEXT, " + "html_content TEXT, " + "text_content TEXT, " + "attachment_count INTEGER, " + "internal_date INTEGER, " + "message_id TEXT, " + "preview TEXT, " + "mime_type TEXT, "+ "normalized_subject_hash INTEGER, " + "empty INTEGER, " + "read INTEGER default 0, " + "flagged INTEGER default 0, " + "answered INTEGER default 0, " + "forwarded INTEGER default 0" + ")"); db.execSQL("DROP TABLE IF EXISTS headers"); db.execSQL("CREATE TABLE headers (id INTEGER PRIMARY KEY, message_id INTEGER, name TEXT, value TEXT)"); db.execSQL("CREATE INDEX IF NOT EXISTS header_folder ON headers (message_id)"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_uid ON messages (uid, folder_id)"); db.execSQL("DROP INDEX IF EXISTS msg_folder_id"); db.execSQL("DROP INDEX IF EXISTS msg_folder_id_date"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_folder_id_deleted_date ON messages (folder_id,deleted,internal_date)"); db.execSQL("DROP INDEX IF EXISTS msg_empty"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_empty ON messages (empty)"); db.execSQL("DROP INDEX IF EXISTS msg_read"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_read ON messages (read)"); db.execSQL("DROP INDEX IF EXISTS msg_flagged"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_flagged ON messages (flagged)"); db.execSQL("DROP INDEX IF EXISTS msg_composite"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_composite ON messages (deleted, empty,folder_id,flagged,read)"); db.execSQL("DROP TABLE IF EXISTS threads"); db.execSQL("CREATE TABLE threads (" + "id INTEGER PRIMARY KEY, " + "message_id INTEGER, " + "root INTEGER, " + "parent INTEGER" + ")"); db.execSQL("DROP INDEX IF EXISTS threads_message_id"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_message_id ON threads (message_id)"); db.execSQL("DROP INDEX IF EXISTS threads_root"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_root ON threads (root)"); db.execSQL("DROP INDEX IF EXISTS threads_parent"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_parent ON threads (parent)"); db.execSQL("DROP TRIGGER IF EXISTS set_thread_root"); db.execSQL("CREATE TRIGGER set_thread_root " + "AFTER INSERT ON threads " + "BEGIN " + "UPDATE threads SET root=id WHERE root IS NULL AND ROWID = NEW.ROWID; " + "END"); db.execSQL("DROP TABLE IF EXISTS attachments"); db.execSQL("CREATE TABLE attachments (id INTEGER PRIMARY KEY, message_id INTEGER," + "store_data TEXT, content_uri TEXT, size INTEGER, name TEXT," + "mime_type TEXT, content_id TEXT, content_disposition TEXT)"); db.execSQL("DROP TABLE IF EXISTS pending_commands"); db.execSQL("CREATE TABLE pending_commands " + "(id INTEGER PRIMARY KEY, command TEXT, arguments TEXT)"); db.execSQL("DROP TRIGGER IF EXISTS delete_folder"); db.execSQL("CREATE TRIGGER delete_folder BEFORE DELETE ON folders BEGIN DELETE FROM messages WHERE old.id = folder_id; END;"); db.execSQL("DROP TRIGGER IF EXISTS delete_message"); db.execSQL("CREATE TRIGGER delete_message BEFORE DELETE ON messages BEGIN DELETE FROM attachments WHERE old.id = message_id; " + "DELETE FROM headers where old.id = message_id; END;"); } else { // in the case that we're starting out at 29 or newer, run all the needed updates if (db.getVersion() < 30) { try { db.execSQL("ALTER TABLE messages ADD deleted INTEGER default 0"); } catch (SQLiteException e) { if (! e.toString().startsWith("duplicate column name: deleted")) { throw e; } } } if (db.getVersion() < 31) { db.execSQL("DROP INDEX IF EXISTS msg_folder_id_date"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_folder_id_deleted_date ON messages (folder_id,deleted,internal_date)"); } if (db.getVersion() < 32) { db.execSQL("UPDATE messages SET deleted = 1 WHERE flags LIKE '%DELETED%'"); } if (db.getVersion() < 33) { try { db.execSQL("ALTER TABLE messages ADD preview TEXT"); } catch (SQLiteException e) { if (! e.toString().startsWith("duplicate column name: preview")) { throw e; } } } if (db.getVersion() < 34) { try { db.execSQL("ALTER TABLE folders ADD flagged_count INTEGER default 0"); } catch (SQLiteException e) { if (! e.getMessage().startsWith("duplicate column name: flagged_count")) { throw e; } } } if (db.getVersion() < 35) { try { db.execSQL("update messages set flags = replace(flags, 'X_NO_SEEN_INFO', 'X_BAD_FLAG')"); } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Unable to get rid of obsolete flag X_NO_SEEN_INFO", e); } } if (db.getVersion() < 36) { try { db.execSQL("ALTER TABLE attachments ADD content_id TEXT"); } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Unable to add content_id column to attachments"); } } if (db.getVersion() < 37) { try { db.execSQL("ALTER TABLE attachments ADD content_disposition TEXT"); } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Unable to add content_disposition column to attachments"); } } // Database version 38 is solely to prune cached attachments now that we clear them better if (db.getVersion() < 39) { try { db.execSQL("DELETE FROM headers WHERE id in (SELECT headers.id FROM headers LEFT JOIN messages ON headers.message_id = messages.id WHERE messages.id IS NULL)"); } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Unable to remove extra header data from the database"); } } // V40: Store the MIME type for a message. if (db.getVersion() < 40) { try { db.execSQL("ALTER TABLE messages ADD mime_type TEXT"); } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Unable to add mime_type column to messages"); } } if (db.getVersion() < 41) { try { db.execSQL("ALTER TABLE folders ADD integrate INTEGER"); db.execSQL("ALTER TABLE folders ADD top_group INTEGER"); db.execSQL("ALTER TABLE folders ADD poll_class TEXT"); db.execSQL("ALTER TABLE folders ADD push_class TEXT"); db.execSQL("ALTER TABLE folders ADD display_class TEXT"); } catch (SQLiteException e) { if (! e.getMessage().startsWith("duplicate column name:")) { throw e; } } Cursor cursor = null; try { SharedPreferences prefs = this.localStore.getPreferences(); cursor = db.rawQuery("SELECT id, name FROM folders", null); while (cursor.moveToNext()) { try { int id = cursor.getInt(0); String name = cursor.getString(1); update41Metadata(db, prefs, id, name); } catch (Exception e) { Log.e(K9.LOG_TAG, " error trying to ugpgrade a folder class", e); } } } catch (SQLiteException e) { Log.e(K9.LOG_TAG, "Exception while upgrading database to v41. folder classes may have vanished", e); } finally { Utility.closeQuietly(cursor); } } if (db.getVersion() == 41) { try { long startTime = System.currentTimeMillis(); SharedPreferences.Editor editor = this.localStore.getPreferences().edit(); List <? extends Folder > folders = this.localStore.getPersonalNamespaces(true); for (Folder folder : folders) { if (folder instanceof LocalFolder) { LocalFolder lFolder = (LocalFolder)folder; lFolder.save(editor); } } editor.commit(); long endTime = System.currentTimeMillis(); Log.i(K9.LOG_TAG, "Putting folder preferences for " + folders.size() + " folders back into Preferences took " + (endTime - startTime) + " ms"); } catch (Exception e) { Log.e(K9.LOG_TAG, "Could not replace Preferences in upgrade from DB_VERSION 41", e); } } if (db.getVersion() < 43) { try { // If folder "OUTBOX" (old, v3.800 - v3.802) exists, rename it to // "K9MAIL_INTERNAL_OUTBOX" (new) LocalFolder oldOutbox = new LocalFolder(this.localStore, "OUTBOX"); if (oldOutbox.exists()) { ContentValues cv = new ContentValues(); cv.put("name", Account.OUTBOX); db.update("folders", cv, "name = ?", new String[] { "OUTBOX" }); Log.i(K9.LOG_TAG, "Renamed folder OUTBOX to " + Account.OUTBOX); } // Check if old (pre v3.800) localized outbox folder exists String localizedOutbox = localStore.context.getString(R.string.special_mailbox_name_outbox); LocalFolder obsoleteOutbox = new LocalFolder(this.localStore, localizedOutbox); if (obsoleteOutbox.exists()) { // Get all messages from the localized outbox ... List<? extends Message> messages = obsoleteOutbox.getMessages(null, false); if (messages.size() > 0) { // ... and move them to the drafts folder (we don't want to // surprise the user by sending potentially very old messages) LocalFolder drafts = new LocalFolder(this.localStore, this.localStore.getAccount().getDraftsFolderName()); obsoleteOutbox.moveMessages(messages, drafts); } // Now get rid of the localized outbox obsoleteOutbox.delete(); obsoleteOutbox.delete(true); } } catch (Exception e) { Log.e(K9.LOG_TAG, "Error trying to fix the outbox folders", e); } } if (db.getVersion() < 44) { try { db.execSQL("ALTER TABLE messages ADD thread_root INTEGER"); db.execSQL("ALTER TABLE messages ADD thread_parent INTEGER"); db.execSQL("ALTER TABLE messages ADD normalized_subject_hash INTEGER"); db.execSQL("ALTER TABLE messages ADD empty INTEGER"); } catch (SQLiteException e) { if (! e.getMessage().startsWith("duplicate column name:")) { throw e; } } } if (db.getVersion() < 45) { try { db.execSQL("DROP INDEX IF EXISTS msg_empty"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_empty ON messages (empty)"); db.execSQL("DROP INDEX IF EXISTS msg_thread_root"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_thread_root ON messages (thread_root)"); db.execSQL("DROP INDEX IF EXISTS msg_thread_parent"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_thread_parent ON messages (thread_parent)"); } catch (SQLiteException e) { if (! e.getMessage().startsWith("duplicate column name:")) { throw e; } } } if (db.getVersion() < 46) { db.execSQL("ALTER TABLE messages ADD read INTEGER default 0"); db.execSQL("ALTER TABLE messages ADD flagged INTEGER default 0"); db.execSQL("ALTER TABLE messages ADD answered INTEGER default 0"); db.execSQL("ALTER TABLE messages ADD forwarded INTEGER default 0"); String[] projection = { "id", "flags" }; ContentValues cv = new ContentValues(); List<Flag> extraFlags = new ArrayList<Flag>(); Cursor cursor = db.query("messages", projection, null, null, null, null, null); try { while (cursor.moveToNext()) { long id = cursor.getLong(0); String flagList = cursor.getString(1); boolean read = false; boolean flagged = false; boolean answered = false; boolean forwarded = false; if (flagList != null && flagList.length() > 0) { String[] flags = flagList.split(","); for (String flagStr : flags) { try { Flag flag = Flag.valueOf(flagStr); switch (flag) { case ANSWERED: { answered = true; break; } case DELETED: { // Don't store this in column 'flags' break; } case FLAGGED: { flagged = true; break; } case FORWARDED: { forwarded = true; break; } case SEEN: { read = true; break; } case DRAFT: case RECENT: case X_DESTROYED: case X_DOWNLOADED_FULL: case X_DOWNLOADED_PARTIAL: case X_GOT_ALL_HEADERS: case X_REMOTE_COPY_STARTED: case X_SEND_FAILED: case X_SEND_IN_PROGRESS: { extraFlags.add(flag); break; } } } catch (Exception e) { // Ignore bad flags } } } cv.put("flags", this.localStore.serializeFlags(extraFlags)); cv.put("read", read); cv.put("flagged", flagged); cv.put("answered", answered); cv.put("forwarded", forwarded); db.update("messages", cv, "id = ?", new String[] { Long.toString(id) }); cv.clear(); extraFlags.clear(); } } finally { cursor.close(); } db.execSQL("CREATE INDEX IF NOT EXISTS msg_read ON messages (read)"); db.execSQL("CREATE INDEX IF NOT EXISTS msg_flagged ON messages (flagged)"); } if (db.getVersion() < 47) { // Create new 'threads' table db.execSQL("DROP TABLE IF EXISTS threads"); db.execSQL("CREATE TABLE threads (" + "id INTEGER PRIMARY KEY, " + "message_id INTEGER, " + "root INTEGER, " + "parent INTEGER" + ")"); // Create indices for new table db.execSQL("DROP INDEX IF EXISTS threads_message_id"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_message_id ON threads (message_id)"); db.execSQL("DROP INDEX IF EXISTS threads_root"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_root ON threads (root)"); db.execSQL("DROP INDEX IF EXISTS threads_parent"); db.execSQL("CREATE INDEX IF NOT EXISTS threads_parent ON threads (parent)"); // Create entries for all messages in 'threads' table db.execSQL("INSERT INTO threads (message_id) SELECT id FROM messages"); // Copy thread structure from 'messages' table to 'threads' Cursor cursor = db.query("messages", new String[] { "id", "thread_root", "thread_parent" }, null, null, null, null, null); try { ContentValues cv = new ContentValues(); while (cursor.moveToNext()) { cv.clear(); long messageId = cursor.getLong(0); if (!cursor.isNull(1)) { long threadRootMessageId = cursor.getLong(1); db.execSQL("UPDATE threads SET root = (SELECT t.id FROM " + "threads t WHERE t.message_id = ?) " + "WHERE message_id = ?", new String[] { Long.toString(threadRootMessageId), Long.toString(messageId) }); } if (!cursor.isNull(2)) { long threadParentMessageId = cursor.getLong(2); db.execSQL("UPDATE threads SET parent = (SELECT t.id FROM " + "threads t WHERE t.message_id = ?) " + "WHERE message_id = ?", new String[] { Long.toString(threadParentMessageId), Long.toString(messageId) }); } } } finally { cursor.close(); } // Remove indices for old thread-related columns in 'messages' table db.execSQL("DROP INDEX IF EXISTS msg_thread_root"); db.execSQL("DROP INDEX IF EXISTS msg_thread_parent"); // Clear out old thread-related columns in 'messages' ContentValues cv = new ContentValues(); cv.putNull("thread_root"); cv.putNull("thread_parent"); db.update("messages", cv, null, null); } if (db.getVersion() < 48) { db.execSQL("UPDATE threads SET root=id WHERE root IS NULL"); db.execSQL("CREATE TRIGGER set_thread_root " + "AFTER INSERT ON threads " + "BEGIN " + "UPDATE threads SET root=id WHERE root IS NULL AND ROWID = NEW.ROWID; " + "END"); } if (db.getVersion() < 49) { db.execSQL("CREATE INDEX IF NOT EXISTS msg_composite ON messages (deleted, empty,folder_id,flagged,read)"); } if (db.getVersion() < 50) { try { db.execSQL("ALTER TABLE folders ADD notify_class TEXT default '" + Folder.FolderClass.INHERITED.name() + "'"); } catch (SQLiteException e) { if (! e.getMessage().startsWith("duplicate column name:")) { throw e; } } ContentValues cv = new ContentValues(); cv.put("notify_class", Folder.FolderClass.FIRST_CLASS.name()); db.update("folders", cv, "name = ?", new String[] { this.localStore.getAccount().getInboxFolderName() }); } } db.setVersion(LocalStore.DB_VERSION); db.setTransactionSuccessful(); } finally { db.endTransaction(); } if (db.getVersion() != LocalStore.DB_VERSION) { throw new RuntimeException("Database upgrade failed!"); } } private void update41Metadata(final SQLiteDatabase db, SharedPreferences prefs, int id, String name) { Folder.FolderClass displayClass = Folder.FolderClass.NO_CLASS; Folder.FolderClass syncClass = Folder.FolderClass.INHERITED; Folder.FolderClass pushClass = Folder.FolderClass.SECOND_CLASS; boolean inTopGroup = false; boolean integrate = false; if (this.localStore.getAccount().getInboxFolderName().equals(name)) { displayClass = Folder.FolderClass.FIRST_CLASS; syncClass = Folder.FolderClass.FIRST_CLASS; pushClass = Folder.FolderClass.FIRST_CLASS; inTopGroup = true; integrate = true; } try { displayClass = Folder.FolderClass.valueOf(prefs.getString(this.localStore.uUid + "." + name + ".displayMode", displayClass.name())); syncClass = Folder.FolderClass.valueOf(prefs.getString(this.localStore.uUid + "." + name + ".syncMode", syncClass.name())); pushClass = Folder.FolderClass.valueOf(prefs.getString(this.localStore.uUid + "." + name + ".pushMode", pushClass.name())); inTopGroup = prefs.getBoolean(this.localStore.uUid + "." + name + ".inTopGroup", inTopGroup); integrate = prefs.getBoolean(this.localStore.uUid + "." + name + ".integrate", integrate); } catch (Exception e) { Log.e(K9.LOG_TAG, " Throwing away an error while trying to upgrade folder metadata", e); } if (displayClass == Folder.FolderClass.NONE) { displayClass = Folder.FolderClass.NO_CLASS; } if (syncClass == Folder.FolderClass.NONE) { syncClass = Folder.FolderClass.INHERITED; } if (pushClass == Folder.FolderClass.NONE) { pushClass = Folder.FolderClass.INHERITED; } db.execSQL("UPDATE folders SET integrate = ?, top_group = ?, poll_class=?, push_class =?, display_class = ? WHERE id = ?", new Object[] { integrate, inTopGroup, syncClass, pushClass, displayClass, id }); } }
Valodim/k-9
k9mail/src/main/java/com/fsck/k9/mailstore/StoreSchemaDefinition.java
Java
bsd-3-clause
29,566
// Copyright 2014 PDFium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com #ifndef XFA_FWL_CFX_BARCODE_H_ #define XFA_FWL_CFX_BARCODE_H_ #include <memory> #include "core/fxcrt/fx_coordinates.h" #include "core/fxcrt/fx_string.h" #include "core/fxcrt/fx_system.h" #include "core/fxge/fx_dib.h" #include "xfa/fxbarcode/BC_Library.h" class CBC_CodeBase; class CFX_Font; class CFX_RenderDevice; class CFX_Matrix; class CFX_Barcode { public: CFX_Barcode(); ~CFX_Barcode(); bool Create(BC_TYPE type); BC_TYPE GetType(); bool Encode(const CFX_WideStringC& contents, bool isDevice, int32_t& e); bool RenderDevice(CFX_RenderDevice* device, const CFX_Matrix* matrix, int32_t& e); bool SetCharEncoding(BC_CHAR_ENCODING encoding); bool SetModuleHeight(int32_t moduleHeight); bool SetModuleWidth(int32_t moduleWidth); bool SetHeight(int32_t height); bool SetWidth(int32_t width); bool SetPrintChecksum(bool checksum); bool SetDataLength(int32_t length); bool SetCalChecksum(bool state); bool SetFont(CFX_Font* pFont); bool SetFontSize(FX_FLOAT size); bool SetFontColor(FX_ARGB color); bool SetTextLocation(BC_TEXT_LOC location); bool SetWideNarrowRatio(int32_t ratio); bool SetStartChar(FX_CHAR start); bool SetEndChar(FX_CHAR end); bool SetVersion(int32_t version); bool SetErrorCorrectionLevel(int32_t level); bool SetTruncated(bool truncated); private: std::unique_ptr<CBC_CodeBase> m_pBCEngine; }; #endif // XFA_FWL_CFX_BARCODE_H_
DrAlexx/pdfium
xfa/fwl/cfx_barcode.h
C
bsd-3-clause
1,696
Title: Inselect v0.1.35 released You can [download v0.1.35](https://github.com/NaturalHistoryMuseum/inselect/releases/tag/v0.1.35). - [425](https://github.com/NaturalHistoryMuseum/inselect/issues/425) Warnings - [420](https://github.com/NaturalHistoryMuseum/inselect/issues/420) zbar decoder on Mac OS X - [418](https://github.com/NaturalHistoryMuseum/inselect/issues/418) Latest gouda and pylibdmtx - [416](https://github.com/NaturalHistoryMuseum/inselect/issues/416) User testing / Python 3 - error message when opening document on 32-bit Windows - [414](https://github.com/NaturalHistoryMuseum/inselect/issues/414) User testing / Python 3 - error message when starting from shortcut - [408](https://github.com/NaturalHistoryMuseum/inselect/issues/408) Installer bloat - [407](https://github.com/NaturalHistoryMuseum/inselect/issues/407) Performance impact of 2to3 - [406](https://github.com/NaturalHistoryMuseum/inselect/issues/406) Show Boxes view on creating new document - [405](https://github.com/NaturalHistoryMuseum/inselect/issues/405) Remove qtpy - [401](https://github.com/NaturalHistoryMuseum/inselect/issues/401) Support new pyzbar and pylibdmtx - [397](https://github.com/NaturalHistoryMuseum/inselect/issues/397) Couple of problems with keyboard shortcuts box - [395](https://github.com/NaturalHistoryMuseum/inselect/issues/395) PyQt5 - [393](https://github.com/NaturalHistoryMuseum/inselect/issues/393) Warning from libpng when running tests - [391](https://github.com/NaturalHistoryMuseum/inselect/issues/391) OpenCV3 - [389](https://github.com/NaturalHistoryMuseum/inselect/issues/389) PyQt4 - [386](https://github.com/NaturalHistoryMuseum/inselect/issues/386) qtpy - [382](https://github.com/NaturalHistoryMuseum/inselect/issues/382) About box too tall - [379](https://github.com/NaturalHistoryMuseum/inselect/issues/379) Gouda v0.1.10 - [377](https://github.com/NaturalHistoryMuseum/inselect/issues/377) Windows non-latin username test - [374](https://github.com/NaturalHistoryMuseum/inselect/issues/374) Support pyzbar - [372](https://github.com/NaturalHistoryMuseum/inselect/issues/372) Support pydmtxlib - [371](https://github.com/NaturalHistoryMuseum/inselect/issues/371) Ugly error message when opening file with non-ascii characters in name - [369](https://github.com/NaturalHistoryMuseum/inselect/issues/369) Keyboard shortcuts box too tall on some displays - [368](https://github.com/NaturalHistoryMuseum/inselect/issues/368) Ugly error message when creating a new inselect document on Mac OS X - [367](https://github.com/NaturalHistoryMuseum/inselect/issues/367) Reveal file doesn't work on Windows - [356](https://github.com/NaturalHistoryMuseum/inselect/issues/356) Error on running subsegment on Ubuntu 14.04 - [344](https://github.com/NaturalHistoryMuseum/inselect/issues/344) Not enough room for toolbar text on Mac retina display - [343](https://github.com/NaturalHistoryMuseum/inselect/issues/343) Missing resource compiler on Mac - [324](https://github.com/NaturalHistoryMuseum/inselect/issues/324) Drag-and-drop to open files doesn't work on Mac OS X - [309](https://github.com/NaturalHistoryMuseum/inselect/issues/309) Put web URL in prominent place - [292](https://github.com/NaturalHistoryMuseum/inselect/issues/292) Mac OS X installer bloat - [273](https://github.com/NaturalHistoryMuseum/inselect/issues/273) Inselect as a package - [223](https://github.com/NaturalHistoryMuseum/inselect/issues/223) Error creating new Inselect document from a jpg - [95](https://github.com/NaturalHistoryMuseum/inselect/issues/95) Python 3 - [83](https://github.com/NaturalHistoryMuseum/inselect/issues/83) Architecture and code organisation
NaturalHistoryMuseum/inselect
website/content/news/2017-01-16-v0.1.35.md
Markdown
bsd-3-clause
3,672
### 11.2.6. 避免脆弱的测试 如果一个应用程序对于新出现的但有效的输入经常失败说明程序容易出bug(不够稳健);同样,如果一个测试仅仅对程序做了微小变化就失败则称为脆弱。就像一个不够稳健的程序会挫败它的用户一样,一个脆弱的测试同样会激怒它的维护者。最脆弱的测试代码会在程序没有任何变化的时候产生不同的结果,时好时坏,处理它们会耗费大量的时间但是并不会得到任何好处。 当一个测试函数会产生一个复杂的输出如一个很长的字符串、一个精心设计的数据结构或一个文件时,人们很容易想预先写下一系列固定的用于对比的标杆数据。但是随着项目的发展,有些输出可能会发生变化,尽管很可能是一个改进的实现导致的。而且不仅仅是输出部分,函数复杂的输入部分可能也跟着变化了,因此测试使用的输入也就不再有效了。 避免脆弱测试代码的方法是只检测你真正关心的属性。保持测试代码的简洁和内部结构的稳定。特别是对断言部分要有所选择。不要对字符串进行全字匹配,而是针对那些在项目的发展中是比较稳定不变的子串。很多时候值得花力气来编写一个从复杂输出中提取用于断言的必要信息的函数,虽然这可能会带来很多前期的工作,但是它可以帮助迅速及时修复因为项目演化而导致的不合逻辑的失败测试。
gopl-zh/gopl-zh.github.com
ch11/ch11-02-6.md
Markdown
bsd-3-clause
1,535
# proxy module from pyface.qt.QtScript import *
enthought/etsproxy
enthought/qt/QtScript.py
Python
bsd-3-clause
48
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= ## @file ostap/frames/tree_reduce.py # Helper module to "Reduce" tree using frames # @see Ostap::DataFrame # @see ROOT::RDataFrame # @author Vanya BELYAEV Ivan.Belyaev@itep.ru # @date 2018-06-16 # ============================================================================= """Helper module to ``reduce'' tree using frames - see Ostap.DataFrame - see ROOT.ROOT.RDataFrame """ # ============================================================================= __version__ = "$Revision$" __author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru" __date__ = "2011-06-07" __all__ = ( 'ReduceTree' , 'reduce' , ) # ============================================================================= import ROOT, os # ============================================================================= # logging # ============================================================================= from ostap.logger.logger import getLogger if '__main__' == __name__ : logger = getLogger( 'ostap.frames.tree_reduce' ) else : logger = getLogger( __name__ ) # ============================================================================= logger.debug ( "``Reduce'' TTree using ROOT::RDataFrame object") # ============================================================================= import ostap.trees.trees from ostap.core.core import cpp, Ostap from ostap.utils.cleanup import CleanUp # ============================================================================= ## @class ReduceTree # Reduce TTree object using intermediate (temporary # @code # tree = ... # r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] , 'new_file.root' ) # reduced = t.tree # @endcode class ReduceTree(CleanUp): """Reduce ROOT.TTree object >>> tree = ... >>> r = ReduceTree ( tree , cuts , [ 'px', 'py', 'pz' ] >>> reduced = r.tree """ def __init__ ( self , chain , ## input TChain/TTree selection = {} , ## selection/cuts save_vars = () , ## list of variables to save new_vars = {} , ## new variables no_vars = () , ## exclude these variables ## output = '' , ## output file name name = '' , ## the name addselvars = False , ## add varibles from selections? tmp_keep = False , ## keep the temporary file silent = False ): ## silent processing from ostap.frames.frames import DataFrame frame = DataFrame ( chain ) report = None self.__frame_main = frame if not silent : pbar = frame.ProgressBar ( len ( chain ) ) nvars = [] ## new variables for nv in new_vars : frame = frame.Define ( nv , new_vars [ nv] ) nvars.append ( nv ) from ostap.core.ostap_types import ( string_types , listlike_types , dictlike_types ) cut_types = string_types + ( ROOT.TCut , ) Lmax = 30 selections = [] if selection and isinstance ( selection , cut_types ) : ss = str ( selection ).strip() if len ( ss ) < Lmax : filter_name = ss else : filter_name = 'SELECTION' frame = frame.Filter ( ss , filter_name ) selections.append ( ss ) elif selection and isinstance ( selection , dictlike_types ) : for filter_name in selection : s = selection [ filter_name ] assert isinstance ( s , cut_types ),\ 'Invalid selection type %s/%s' % ( s , type ( s ) ) ss = str ( s ).strip() frame = frame.Filter ( ss , str ( filter_name ) ) selections.append ( ss ) elif selection and isinstance ( selection , listlike_types ) : for i , s in enumerate ( selection ) : assert isinstance ( s , cut_types ),\ 'Invalid selection type %s/%s' % ( s , type ( s ) ) ss = str( s ).strip() ## if len ( ss ) < Lmax : filter_name = ss else : filter_name = 'SELECTION%d' % i # frame = frame.Filter ( ss , filter_name ) selections.append ( ss ) elif selection : raise TypeError('Invalid selection type %s/%s' % ( selection , type ( selection ) ) ) if not output : output = self.tempfile ( prefix = 'ostap-frame-' , suffix = '.root' ) ## logger.debug ( 'ReduceTree: output file is %s' % output ) if not tmp_keep : self.trash.add ( output ) ## if selections : report = frame.Report() if selections and addselvars : bvars = chain.the_variables ( selections ) save_vars = list ( bvars ) + [ v for v in save_vars if not v in bvars ] save_vars = tuple ( save_vars ) ## exclude some variables if no_vars and not save_vars : bvars = list ( chain.branches () ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] ) elif no_vars : bvars = chain.the_variables ( *save_vars ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] save_vars = tuple ( [ v for v in all_vars if not v in no_vars ] ) nb_ = len ( chain.branches () ) ne_ = len ( chain ) ## chain name: ## FIXME! # cname = chain.GetName() ## produces ROOT error if not name : _ , _ , cname = chain.GetName().rpartition ( '/' ) name = '%s_reduced' % cname self.__name = name if not save_vars : snapshot = frame.Snapshot ( name , output ) else : bvars = chain.the_variables ( *save_vars ) all_vars = list ( bvars ) + [ v for v in nvars if not v in bvars ] from ostap.core.core import strings as _strings all_vars = _strings ( all_vars ) snapshot = frame.Snapshot ( name , output , all_vars ) assert os.path.exists ( output ) and\ os.path.isfile ( output ) , 'Invalid file %s' % fname self.__chain = ROOT.TChain ( name ) self.__chain.Add ( output ) self.__output = output self.__report = 'Tree -> Frame -> Tree filter/transformation' self.__table = [] if report : from ostap.frames.frames import report_print, report_as_table title = self.__report self.__report += '\n%s' % report_print ( report , title , '# ') self.__table = report_as_table ( report ) fs = os.path.getsize ( self.__output ) gb , r = divmod ( fs , 1024 * 1024 * 1024 ) mb , r = divmod ( r , 1024 * 1024 ) kb , r = divmod ( r , 1024 ) if gb : fs = '%.1fGB' % ( float ( fs ) / 1024 / 1024 / 1024 ) elif mb : fs = '%.1fMB' % ( float ( fs ) / 1024 / 1024 ) elif kb : fs = '%.1fkB' % ( float ( fs ) / 1024 ) else : fs = '%sB' % fs nb = len ( self.__chain.branches () ) ne = len ( self.__chain ) self.__report += '\n# Reduce %d -> %d branches, %d -> %d entries' % ( nb_ , nb , ne_ , ne ) self.__report += '\n# Output:%s size:%s' % ( self.__output , fs ) self.__report += '\n# %s' % str ( self.__chain ) del self.__frame_main def __str__ ( self ) : return self.__report def __repr__ ( self ) : return self.__report @property def output ( self ) : """``output'' : the output file name""" return self.__output @property def chain ( self ) : """``chain'': the reduced chain/tree (same as tree)""" return self.__chain @property def name ( self ) : """``name'' : the output chain name""" return self.__name @property def tree ( self ) : """``tree'': the reduced chain/tree (same as chain)""" return self.__chain @property def table ( self ) : """``table'' : get the statitics as table""" return self.__table @property def report ( self ) : """``report'' : get the statitics report""" return self.__report # =============================================================================== ## Powerful method to reduce/tranform the tree/chain. # It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows # - filter entries from TTree/TChain # - add new colums # - remove unnesessary columns # @code # tree = .... # reduced1 = tree.reduce ( 'pt>1' ) # reduced2 = tree.reduce ( 'pt>1' , save_vars = [ 'p', 'pt' ,'q' ] ) # reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] ) # reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } ) # reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' ) # @endcode # @see Ostap::DataFrame # @see ROOT::RDataFrame def reduce ( tree , selection , save_vars = () , new_vars = {} , no_vars = () , output = '' , name = '' , addselvars = False , silent = False ) : """ Powerful method to reduce/tranform the tree/chain. It relies on Ostap.DataFrame ( alias for ROOT.ROOT.DataFrame) and allows - filter entries from TTree/TChain - add new colums - remove unnesessary columns >>> tree = .... >>> reduced1 = tree.reduce ( 'pt>1' ) >>> reduced2 = tree.reduce ( 'pt>1' , vars = [ 'p', 'pt' ,'q' ] ) >>> reduced3 = tree.reduce ( 'pt>1' , no_vars = [ 'Q', 'z' ,'x' ] ) >>> reduced4 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } ) >>> reduced5 = tree.reduce ( 'pt>1' , new_vars = { 'pt2' : 'pt*pt' } , output = 'OUTPUT.root' ) """ nb0 = len ( tree.branches() ) ne0 = len ( tree ) reduced = ReduceTree ( tree , selection = selection , save_vars = save_vars , new_vars = new_vars , no_vars = no_vars , output = output , name = name , addselvars = addselvars , tmp_keep = True , silent = silent ) from ostap.trees.trees import Chain result = Chain ( reduced.chain ) if not output : result.trash.add ( reduced.output ) if silent : nb = len ( result.chain.branches() ) ne = len ( result.chain ) f = float ( nb0 * ne0 ) / ( nb * ne ) logger.info ( 'reduce: (%dx%d) -> (%dx%d) %.1f (branches x entries) ' % ( nb0 , ne0 , nb , ne , f ) ) return result ROOT.TTree. reduce = reduce # ============================================================================= _decorated_classes_ = ( ROOT.TTree , ) _new_methods_ = ( ROOT.TTree.reduce , ) # ============================================================================= if '__main__' == __name__ : from ostap.utils.docme import docme docme ( __name__ , logger = logger ) # ============================================================================= # The END # =============================================================================
OstapHEP/ostap
ostap/frames/tree_reduce.py
Python
bsd-3-clause
12,435
<?php namespace SalesObjects\Model; use Zend\InputFilter\Factory as InputFactory; // <-- Add this import use Zend\InputFilter\InputFilter; // <-- Add this import use Zend\InputFilter\InputFilterAwareInterface; // <-- Add this import use Zend\InputFilter\InputFilterInterface; // <-- Add this import class Resorts extends \Base\Model\AvpModel implements InputFilterAwareInterface{ protected $inputFilter; protected $_maxImageSize = 2097152; //2*1024*1024 = 2Mb public function getImagePath(){ return \Base\Model\Plugins\Imagine::$imagesBaseUrl.'resort/thumbnails_80x80/'; } protected $_imageOptions = array( //img 80x80 array( 'options' => array('width'=>80, 'height'=>80), 'destination' => 'resort/thumbnails_80x80/' ), //img 91x65 array( 'options' => array('width'=>91, 'height'=>65), 'destination' => 'resort/thumbnails_91x65' ), //img 150x150 array( 'options' => array('width'=>150, 'height'=>150), 'destination' => 'resort/thumbnails_150x150/' ), //img 158x106 array( 'options' => array('width'=>158, 'height'=>106), 'destination' => 'resort/thumbnails_158x106/' ), //img 202x144 array( 'options' => array('width'=>202, 'height'=>144), 'destination' => 'resort/thumbnails_202x144/' ), //img 288x196 array( 'options' => array('width'=>288, 'height'=>196), 'destination' => 'resort/thumbnails_288x196/' ), //img 288x161 array( 'options' => array('width'=>288, 'height'=>161), 'destination' => 'resort/thumbnails_288x161/' ), //img 957x381 array( 'options' => array('width'=>957, 'height'=>381), 'destination' => 'resort/slider_957x381/' ), //img 701x456 array( 'options' => array('width'=>701, 'height'=>456), 'destination' => 'resort/slider_701x456/' ), //img small 250x250 array( 'options' => array('width'=>250, 'height'=>250), 'destination' => 'resort/small/' ), //img large 800x600 array( 'options' => array('width'=>800, 'height'=>600), 'destination' => 'resort/large/' ), //img array( 'options' => null, 'destination' => 'resort/' ) ); public function setInputFilter(InputFilterInterface $inputFilter) { throw new \Exception("Not used"); } public function getInputFilter() { if (!$this->inputFilter) { $inputFilter = new InputFilter(); $factory = new InputFactory(); //General Information $inputFilter->add($factory->createInput(array( 'name' => 'title', 'required' => true, 'filters' => array( array('name' => 'StripTags'), array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, 'max' => 255, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'pageHeading', 'required' => false, 'filters' => array( array('name' => 'StripTags'), array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, 'max' => 255, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'categoryId', 'required' => false, ))); $inputFilter->add($factory->createInput(array( 'name' => 'countryId', 'required' => false, ))); $inputFilter->add($factory->createInput(array( 'name' => 'overview', 'required' => true, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'amenities', 'required' => false, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'entertainment', 'required' => false, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'image', 'required' => false, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'max' => 244, ), ), ), ))); //Popover data $inputFilter->add($factory->createInput(array( 'name' => 'popoverTitle', 'required' => false, 'filters' => array( array('name' => 'StripTags'), array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, 'max' => 64, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'popoverContent', 'required' => false, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, ), ), ), ))); //SEO Information $inputFilter->add($factory->createInput(array( 'name' => 'metaTitle', 'required' => false, 'filters' => array( array('name' => 'StripTags'), array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, 'max' => 255, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'metaDescription', 'required' => false, 'filters' => array( array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, ), ), ), ))); $inputFilter->add($factory->createInput(array( 'name' => 'metaKeywords', 'required' => false, 'filters' => array( array('name' => 'StripTags'), array('name' => 'StringTrim'), ), 'validators' => array( array( 'name' => 'StringLength', 'options' => array( 'encoding' => 'UTF-8', 'min' => 2, 'max' => 255, ), ), ), ))); $this->inputFilter = $inputFilter; } return $this->inputFilter; } protected function isValidTitle($form){ if($this->checkItem(array('title'=>$form->getData()->getTitle()))){ $form->get('title')->setMessages(array('The title already exists')); return false; } return true; } public function isValidModel($form){ return $this->isValidTitle($form); } public function isValidModelOnEdit($form){ if($this->checkItem(array( 'id'=>$form->getData()->getId(), 'title'=>$form->getData()->getTitle())) ){ return true; } return $this->isValidTitle($form); } public function isValidImage($form, $files){ if(empty($files['image']['name'])) return true; $size = (int)$files['image']['size']; if($size == 0 || $this->_maxImageSize < $size){ $form->get('image')->setMessages(array('Max size 2 Mb')); return false; } $types = array('image/gif', 'image/png', 'image/jpeg', 'image/pjpeg'); if (!in_array($files['image']['type'], $types)){ $form->get('image')->setMessages(array('Invalid file type. Upload images: *.gif, *.png, *.jpg')); return false; } return true; } protected function saveImages($tmpName, $imgName){ foreach($this->_imageOptions as $imgOption){ \Base\Model\Plugins\Imagine::uploadImage($tmpName, $imgName, $imgOption['options'], $imgOption['destination']); } } protected function deleteImages($imageName){ foreach($this->_imageOptions as $imgOption){ $file = \Base\Model\Plugins\Imagine::$srcDestination.$imgOption['destination'].$imageName; if(file_exists($file)){ unlink($file); } } } public function update($object){ $obj = $object['object']; $tmpImage = $object['tmpImage']; if($obj->getImage()!== $tmpImage){ $this->deleteImages($tmpImage); $this->save($object); }else{ parent::save($obj); } } public function save($object){ $obj = $object['object']; $date = getdate(); $obj->setImage($date[0].'-'.$obj->getImage()); parent::save($obj); $this->saveImages($object['files']['image']['tmp_name'], $obj->getImage()); } public function delete($id){ $em = $this->_entityManager; $entity = $em->getReference($this->_targetEntity, (int)$id); //delete images $this->deleteImages($entity->getImage()); //delete resort rooms $query = $em->createQuery('SELECT r.image AS image FROM \Base\Entity\Avp\ResortRooms r WHERE r.resortId = :id'); $query->setParameter('id', $id); $result = $query->getResult(); $rooms = new Rooms($this->_serviceManager); foreach($result as $item){ $rooms->deleteImages($item['image']); } $rooms = null; $em = $this->getEntityManager(); $qb = $em->createQueryBuilder(); $qb->delete('Base\Entity\Avp\Resorts', 'u') ->where('u.id=:Id') ->setParameter('Id', $id); $query = $qb->getQuery(); $collection = $query->getResult(); $em = $this->getEntityManager(); $qb = $em->createQueryBuilder(); $qb->delete('Base\Entity\Avp\ResortRooms', 'u') ->where('u.resortId=:Id') ->setParameter('Id', $id); $query = $qb->getQuery(); $collection = $query->getResult(); //$query = $em->createQuery('DELETE FROM \Base\Entity\Avp\ResortRooms r WHERE r.resortId = :id'); //$query->setParameter('id', $id); //$query->getResult(); //delete resort //$em->remove($entity); $em->flush(); return true; } }
Jhonnorton/vivekvtc
module/SalesObjects/src/SalesObjects/Model/Resorts_09-10-2014.php
PHP
bsd-3-clause
12,567
-- Copyright (c) 2016-present, Facebook, Inc. -- All rights reserved. -- -- This source code is licensed under the BSD-style license found in the -- LICENSE file in the root directory of this source tree. An additional grant -- of patent rights can be found in the PATENTS file in the same directory. {-# LANGUAGE OverloadedStrings #-} module Duckling.Email.EN.Corpus ( corpus ) where import Data.String import Prelude import Duckling.Email.Types import Duckling.Testing.Types corpus :: Corpus corpus = (testContext, allExamples) allExamples :: [Example] allExamples = concat [ examples (EmailData "alice@exAmple.io") [ "alice at exAmple.io" ] , examples (EmailData "yo+yo@blah.org") [ "yo+yo at blah.org" ] , examples (EmailData "1234+abc@x.net") [ "1234+abc at x.net" ] , examples (EmailData "jean-jacques@stuff.co.uk") [ "jean-jacques at stuff.co.uk" ] ]
rfranek/duckling
Duckling/Email/EN/Corpus.hs
Haskell
bsd-3-clause
983
<?php /** * The Service module provides basic configuration validation during service instantiation. * * Basic Usage: * <code> * use Foundry\Core\Service; * class SomeService { * public static $required_options = array("hostname", * "username", * "password"); * __construct($options) { * // Validate that all the required options are present * $valid = Service::validate($options, self::$required_options); * if (!$valid) { registerError("Unable to load SomeService: configuration options not set."); * } * } * } * </code> * * @category Foundry-Core * @package Foundry\Core * @author John Roepke <john@justjohn.us> * @copyright 2010-2011 John Roepke * @license http://phpfoundry.com/license/bsd New BSD license * @version 1.0.0 */ namespace Foundry\Core; use Foundry\Core\Exceptions\ServiceValidationException; /** * The Service validator. * * @category Foundry-Core * @package Foundry\Core * @author John Roepke <john@justjohn.us> * @copyright 2010-2011 John Roepke * @license http://phpfoundry.com/license/bsd New BSD license * @since 1.0.0 */ class Service { /** * Validate all required optoins are present in options. * @param array $options * @param array $required_options * @throws ServiceValidationException All required options are not present. */ public static function validate($options, $required_options) { if (!is_array($options) || !is_array($required_options)) { throw new ServiceValidationException("Passed options are not in expected format (array) got " . get_a($options, false) . ", check that the options have been set"); } if (empty($options) && !empty($required_options)) { throw new ServiceValidationException("No options set, required: " . get_a($required_options, false)); } $option_keys = array_keys($options); /** * If all the options are present the intersection will * be the same size as the required options array. */ $used_options = array_intersect($option_keys, $required_options); if(count($used_options) != count($required_options)) { throw new ServiceValidationException("Not all required options are present, found " . get_a($option_keys, false) . " required: " . get_a($required_options, false)); } } } ?>
justjohn/foundry-core
lib/Foundry/Core/Service.php
PHP
bsd-3-clause
2,730
<?php namespace frontend\controllers; use Yii; use backend\models\Comment; use backend\models\CommentSearch; use yii\web\Controller; use yii\web\NotFoundHttpException; use yii\filters\VerbFilter; /** * CommentController implements the CRUD actions for Comment model. */ class CommentController extends Controller { public function behaviors() { return [ 'verbs' => [ 'class' => VerbFilter::className(), 'actions' => [ 'delete' => ['post'], ], ], ]; } /** * Lists all Comment models. * @return mixed */ public function actionIndex() { $searchModel = new CommentSearch(); $dataProvider = $searchModel->search(Yii::$app->request->queryParams); return $this->render('index', [ 'searchModel' => $searchModel, 'dataProvider' => $dataProvider, ]); } /** * Displays a single Comment model. * @param integer $id * @return mixed */ public function actionView($id) { return $this->render('view', [ 'model' => $this->findModel($id), ]); } /** * Creates a new Comment model. * If creation is successful, the browser will be redirected to the 'view' page. * @return mixed */ public function actionCreate() { $model = new Comment(); if ($model->load(Yii::$app->request->post()) && $model->save()) { return $this->redirect(['view', 'id' => $model->id]); } else { return $this->render('create', [ 'model' => $model, ]); } } private function getASaying(){ //saying $saying = Yii::$app->db->createCommand('SELECT id, saying FROM saying ORDER BY rand() LIMIT 1') ->queryAll(); $this->view->params['saying'] = $saying[0]['saying']; } /** * Updates an existing Comment model. * If update is successful, the browser will be redirected to the 'view' page. * @param integer $id * @return mixed */ public function actionUpdate($id,$current) { $this->getASaying(); $this->layout = '/index.php'; $model = $this->findModel($id); $posts = Yii::$app->db->createCommand('select id,lvl,name,case when lvl = 0 then 0 else root end as root,name from tbl_product where root = (select root from tbl_product where id = '.$current.' ) and status = 0 order by lft') ->queryAll(); $content = Yii::$app->db->createCommand('select name,content,key_word,font_size from tbl_product where id = '.$current.' and status = 0 order by lft') ->queryAll(); $this->view->params['key'] = $content[0]['key_word']; $this->view->params['current_id'] = $id; $this->view->params['title'] = $content[0]['name']; $this->view->params['root_title'] = $posts[0]['name']; if ($model->load(Yii::$app->request->post()) && $model->save()) { return $this->redirect(['view', 'id' => $model->id]); } else { return $this->render('update', [ 'model' => $model, ]); } } /** * Deletes an existing Comment model. * If deletion is successful, the browser will be redirected to the 'index' page. * @param integer $id * @return mixed */ public function actionDelete($id) { $this->findModel($id)->delete(); return $this->redirect(['index']); } /** * Finds the Comment model based on its primary key value. * If the model is not found, a 404 HTTP exception will be thrown. * @param integer $id * @return Comment the loaded model * @throws NotFoundHttpException if the model cannot be found */ protected function findModel($id) { if (($model = Comment::findOne($id)) !== null) { return $model; } else { throw new NotFoundHttpException('The requested page does not exist.'); } } }
mianbao726/sunny
_protected/frontend/controllers/CommentController.php
PHP
bsd-3-clause
4,123
/* $NetBSD: bootconfig.h,v 1.2 2002/04/12 18:01:17 bjh21 Exp $ */ /* * Copyright (c) 1994 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * boot configuration structures * * Created : 12/09/94 * * Based on kate/boot/bootconfig.h */ typedef struct _PhysMem { u_int address; u_int pages; } PhysMem; #define DRAM_BLOCKS 1 typedef struct _BootConfig { PhysMem dram[DRAM_BLOCKS]; u_int dramblocks; } BootConfig; extern BootConfig bootconfig; #define MAX_BOOT_STRING 255 #ifdef _KERNEL #define BOOTOPT_TYPE_BOOLEAN 0 #define BOOTOPT_TYPE_STRING 1 #define BOOTOPT_TYPE_INT 2 #define BOOTOPT_TYPE_BININT 3 #define BOOTOPT_TYPE_HEXINT 4 #define BOOTOPT_TYPE_MASK 7 int get_bootconf_option __P((char *string, char *option, int type, void *result)); extern char *boot_args; extern char *boot_file; #endif /* _KERNEL */ /* End of bootconfig.h */
MarginC/kame
netbsd/sys/arch/netwinder/include/bootconfig.h
C
bsd-3-clause
2,635
/* $NetBSD: linux_machdep.c,v 1.46 1999/01/08 11:59:38 kleink Exp $ */ /*- * Copyright (c) 1995 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Frank van der Linden. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "opt_vm86.h" #include "opt_user_ldt.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/signalvar.h> #include <sys/kernel.h> #include <sys/map.h> #include <sys/proc.h> #include <sys/user.h> #include <sys/buf.h> #include <sys/reboot.h> #include <sys/conf.h> #include <sys/exec.h> #include <sys/file.h> #include <sys/callout.h> #include <sys/malloc.h> #include <sys/mbuf.h> #include <sys/msgbuf.h> #include <sys/mount.h> #include <sys/vnode.h> #include <sys/device.h> #include <sys/syscallargs.h> #include <sys/filedesc.h> #include <sys/exec_elf.h> #include <compat/linux/common/linux_types.h> #include <compat/linux/common/linux_signal.h> #include <compat/linux/common/linux_util.h> #include <compat/linux/common/linux_ioctl.h> #include <compat/linux/common/linux_exec.h> #include <compat/linux/common/linux_machdep.h> #include <compat/linux/linux_syscallargs.h> #include <machine/cpu.h> #include <machine/cpufunc.h> #include <machine/psl.h> #include <machine/reg.h> #include <machine/segments.h> #include <machine/specialreg.h> #include <machine/sysarch.h> #include <machine/vm86.h> #include <machine/vmparam.h> /* * To see whether pcvt is configured (for virtual console ioctl calls). */ #ifndef NVT #include "vt.h" #endif #if NVT > 0 #include <arch/i386/isa/pcvt/pcvt_ioctl.h> #endif #include "wsdisplay.h" #if (NWSDISPLAY > 0) #include <sys/ioctl.h> #include <dev/wscons/wsdisplay_usl_io.h> #include "opt_xserver.h" #endif #ifdef USER_LDT #include <machine/cpu.h> int linux_read_ldt __P((struct proc *, struct linux_sys_modify_ldt_args *, register_t *)); int linux_write_ldt __P((struct proc *, struct linux_sys_modify_ldt_args *, register_t *)); #endif /* * Deal with some i386-specific things in the Linux emulation code. */ void linux_setregs(p, epp, stack) struct proc *p; struct exec_package *epp; u_long stack; { register struct pcb *pcb = &p->p_addr->u_pcb; setregs(p, epp, stack); pcb->pcb_savefpu.sv_env.en_cw = __Linux_NPXCW__; } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * in u. to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void linux_sendsig(catcher, sig, mask, code) sig_t catcher; int sig; sigset_t *mask; u_long code; { register struct proc *p = curproc; register struct trapframe *tf; struct linux_sigframe *fp, frame; struct sigacts *psp = p->p_sigacts; tf = p->p_md.md_regs; /* Allocate space for the signal handler context. */ /* XXX Linux doesn't support the signal stack. */ fp = (struct linux_sigframe *)tf->tf_esp; fp--; /* Build stack frame for signal trampoline. */ frame.sf_handler = catcher; frame.sf_sig = native_to_linux_sig[sig]; /* Save register context. */ #ifdef VM86 if (tf->tf_eflags & PSL_VM) { frame.sf_sc.sc_gs = tf->tf_vm86_gs; frame.sf_sc.sc_fs = tf->tf_vm86_fs; frame.sf_sc.sc_es = tf->tf_vm86_es; frame.sf_sc.sc_ds = tf->tf_vm86_ds; frame.sf_sc.sc_eflags = get_vflags(p); } else #endif { __asm("movl %%gs,%w0" : "=r" (frame.sf_sc.sc_gs)); __asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs)); frame.sf_sc.sc_es = tf->tf_es; frame.sf_sc.sc_ds = tf->tf_ds; frame.sf_sc.sc_eflags = tf->tf_eflags; } frame.sf_sc.sc_edi = tf->tf_edi; frame.sf_sc.sc_esi = tf->tf_esi; frame.sf_sc.sc_ebp = tf->tf_ebp; frame.sf_sc.sc_ebx = tf->tf_ebx; frame.sf_sc.sc_edx = tf->tf_edx; frame.sf_sc.sc_ecx = tf->tf_ecx; frame.sf_sc.sc_eax = tf->tf_eax; frame.sf_sc.sc_eip = tf->tf_eip; frame.sf_sc.sc_cs = tf->tf_cs; frame.sf_sc.sc_esp_at_signal = tf->tf_esp; frame.sf_sc.sc_ss = tf->tf_ss; frame.sf_sc.sc_err = tf->tf_err; frame.sf_sc.sc_trapno = tf->tf_trapno; /* Save signal stack. */ /* XXX Linux doesn't support the signal stack. */ /* Save signal mask. */ native_to_linux_sigset(mask, &frame.sf_sc.sc_mask); if (copyout(&frame, fp, sizeof(frame)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(p, SIGILL); /* NOTREACHED */ } /* * Build context to run handler in. */ tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = (int)psp->ps_sigcode; tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); /* Remember that we're now on the signal stack. */ /* XXX Linux doesn't support the signal stack. */ } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int linux_sys_rt_sigreturn(p, v, retval) struct proc *p; void *v; register_t *retval; { /* XXX XAX write me */ return(ENOSYS); } int linux_sys_sigreturn(p, v, retval) struct proc *p; void *v; register_t *retval; { struct linux_sys_sigreturn_args /* { syscallarg(struct linux_sigcontext *) scp; } */ *uap = v; struct linux_sigcontext *scp, context; register struct trapframe *tf; sigset_t mask; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, scp); if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* Restore register context. */ tf = p->p_md.md_regs; #ifdef VM86 if (context.sc_eflags & PSL_VM) { tf->tf_vm86_gs = context.sc_gs; tf->tf_vm86_fs = context.sc_fs; tf->tf_vm86_es = context.sc_es; tf->tf_vm86_ds = context.sc_ds; set_vflags(p, context.sc_eflags); } else #endif { /* * Check for security violations. If we're returning to * protected mode, the CPU will validate the segment registers * automatically and generate a trap on violations. We handle * the trap, rather than doing all of the checking here. */ if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || !USERMODE(context.sc_cs, context.sc_eflags)) return (EINVAL); /* %fs and %gs were restored by the trampoline. */ tf->tf_es = context.sc_es; tf->tf_ds = context.sc_ds; tf->tf_eflags = context.sc_eflags; } tf->tf_edi = context.sc_edi; tf->tf_esi = context.sc_esi; tf->tf_ebp = context.sc_ebp; tf->tf_ebx = context.sc_ebx; tf->tf_edx = context.sc_edx; tf->tf_ecx = context.sc_ecx; tf->tf_eax = context.sc_eax; tf->tf_eip = context.sc_eip; tf->tf_cs = context.sc_cs; tf->tf_esp = context.sc_esp_at_signal; tf->tf_ss = context.sc_ss; /* Restore signal stack. */ p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ linux_to_native_sigset(&context.sc_mask, &mask); (void) sigprocmask1(p, SIG_SETMASK, &mask, 0); return (EJUSTRETURN); } #ifdef USER_LDT int linux_read_ldt(p, uap, retval) struct proc *p; struct linux_sys_modify_ldt_args /* { syscallarg(int) func; syscallarg(void *) ptr; syscallarg(size_t) bytecount; } */ *uap; register_t *retval; { struct i386_get_ldt_args gl; int error; caddr_t sg; char *parms; sg = stackgap_init(p->p_emul); gl.start = 0; gl.desc = SCARG(uap, ptr); gl.num = SCARG(uap, bytecount) / sizeof(union descriptor); parms = stackgap_alloc(&sg, sizeof(gl)); if ((error = copyout(&gl, parms, sizeof(gl))) != 0) return (error); if ((error = i386_get_ldt(p, parms, retval)) != 0) return (error); *retval *= sizeof(union descriptor); return (0); } struct linux_ldt_info { u_int entry_number; u_long base_addr; u_int limit; u_int seg_32bit:1; u_int contents:2; u_int read_exec_only:1; u_int limit_in_pages:1; u_int seg_not_present:1; }; int linux_write_ldt(p, uap, retval) struct proc *p; struct linux_sys_modify_ldt_args /* { syscallarg(int) func; syscallarg(void *) ptr; syscallarg(size_t) bytecount; } */ *uap; register_t *retval; { struct linux_ldt_info ldt_info; struct segment_descriptor sd; struct i386_set_ldt_args sl; int error; caddr_t sg; char *parms; if (SCARG(uap, bytecount) != sizeof(ldt_info)) return (EINVAL); if ((error = copyin(SCARG(uap, ptr), &ldt_info, sizeof(ldt_info))) != 0) return error; if (ldt_info.contents == 3) return (EINVAL); sg = stackgap_init(p->p_emul); sd.sd_lobase = ldt_info.base_addr & 0xffffff; sd.sd_hibase = (ldt_info.base_addr >> 24) & 0xff; sd.sd_lolimit = ldt_info.limit & 0xffff; sd.sd_hilimit = (ldt_info.limit >> 16) & 0xf; sd.sd_type = 16 | (ldt_info.contents << 2) | (!ldt_info.read_exec_only << 1); sd.sd_dpl = SEL_UPL; sd.sd_p = !ldt_info.seg_not_present; sd.sd_def32 = ldt_info.seg_32bit; sd.sd_gran = ldt_info.limit_in_pages; sl.start = ldt_info.entry_number; sl.desc = stackgap_alloc(&sg, sizeof(sd)); sl.num = 1; #if 0 printf("linux_write_ldt: idx=%d, base=%x, limit=%x\n", ldt_info.entry_number, ldt_info.base_addr, ldt_info.limit); #endif parms = stackgap_alloc(&sg, sizeof(sl)); if ((error = copyout(&sd, sl.desc, sizeof(sd))) != 0) return (error); if ((error = copyout(&sl, parms, sizeof(sl))) != 0) return (error); if ((error = i386_set_ldt(p, parms, retval)) != 0) return (error); *retval = 0; return (0); } #endif /* USER_LDT */ int linux_sys_modify_ldt(p, v, retval) struct proc *p; void *v; register_t *retval; { struct linux_sys_modify_ldt_args /* { syscallarg(int) func; syscallarg(void *) ptr; syscallarg(size_t) bytecount; } */ *uap = v; switch (SCARG(uap, func)) { #ifdef USER_LDT case 0: return (linux_read_ldt(p, uap, retval)); case 1: return (linux_write_ldt(p, uap, retval)); #endif /* USER_LDT */ default: return (ENOSYS); } } /* * XXX Pathetic hack to make svgalib work. This will fake the major * device number of an opened VT so that svgalib likes it. grmbl. * Should probably do it 'wrong the right way' and use a mapping * array for all major device numbers, and map linux_mknod too. */ dev_t linux_fakedev(dev) dev_t dev; { #if (NVT > 0) if (major(dev) == NETBSD_PCCONS_MAJOR) return makedev(LINUX_CONS_MAJOR, (minor(dev) + 1)); #endif #if (NWSDISPLAY > 0) if (major(dev) == NETBSD_WSCONS_MAJOR) return makedev(LINUX_CONS_MAJOR, (minor(dev) + 1)); #endif return dev; } #if (NWSDISPLAY > 0) && defined(XSERVER) /* * That's not complete, but enough to get an X server running. */ #define NR_KEYS 128 static u_short plain_map[NR_KEYS] = { 0x0200, 0x001b, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x0030, 0x002d, 0x003d, 0x007f, 0x0009, 0x0b71, 0x0b77, 0x0b65, 0x0b72, 0x0b74, 0x0b79, 0x0b75, 0x0b69, 0x0b6f, 0x0b70, 0x005b, 0x005d, 0x0201, 0x0702, 0x0b61, 0x0b73, 0x0b64, 0x0b66, 0x0b67, 0x0b68, 0x0b6a, 0x0b6b, 0x0b6c, 0x003b, 0x0027, 0x0060, 0x0700, 0x005c, 0x0b7a, 0x0b78, 0x0b63, 0x0b76, 0x0b62, 0x0b6e, 0x0b6d, 0x002c, 0x002e, 0x002f, 0x0700, 0x030c, 0x0703, 0x0020, 0x0207, 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, 0x0108, 0x0109, 0x0208, 0x0209, 0x0307, 0x0308, 0x0309, 0x030b, 0x0304, 0x0305, 0x0306, 0x030a, 0x0301, 0x0302, 0x0303, 0x0300, 0x0310, 0x0206, 0x0200, 0x003c, 0x010a, 0x010b, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x030e, 0x0702, 0x030d, 0x001c, 0x0701, 0x0205, 0x0114, 0x0603, 0x0118, 0x0601, 0x0602, 0x0117, 0x0600, 0x0119, 0x0115, 0x0116, 0x011a, 0x010c, 0x010d, 0x011b, 0x011c, 0x0110, 0x0311, 0x011d, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, }, shift_map[NR_KEYS] = { 0x0200, 0x001b, 0x0021, 0x0040, 0x0023, 0x0024, 0x0025, 0x005e, 0x0026, 0x002a, 0x0028, 0x0029, 0x005f, 0x002b, 0x007f, 0x0009, 0x0b51, 0x0b57, 0x0b45, 0x0b52, 0x0b54, 0x0b59, 0x0b55, 0x0b49, 0x0b4f, 0x0b50, 0x007b, 0x007d, 0x0201, 0x0702, 0x0b41, 0x0b53, 0x0b44, 0x0b46, 0x0b47, 0x0b48, 0x0b4a, 0x0b4b, 0x0b4c, 0x003a, 0x0022, 0x007e, 0x0700, 0x007c, 0x0b5a, 0x0b58, 0x0b43, 0x0b56, 0x0b42, 0x0b4e, 0x0b4d, 0x003c, 0x003e, 0x003f, 0x0700, 0x030c, 0x0703, 0x0020, 0x0207, 0x010a, 0x010b, 0x010c, 0x010d, 0x010e, 0x010f, 0x0110, 0x0111, 0x0112, 0x0113, 0x0213, 0x0203, 0x0307, 0x0308, 0x0309, 0x030b, 0x0304, 0x0305, 0x0306, 0x030a, 0x0301, 0x0302, 0x0303, 0x0300, 0x0310, 0x0206, 0x0200, 0x003e, 0x010a, 0x010b, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x030e, 0x0702, 0x030d, 0x0200, 0x0701, 0x0205, 0x0114, 0x0603, 0x020b, 0x0601, 0x0602, 0x0117, 0x0600, 0x020a, 0x0115, 0x0116, 0x011a, 0x010c, 0x010d, 0x011b, 0x011c, 0x0110, 0x0311, 0x011d, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, }, altgr_map[NR_KEYS] = { 0x0200, 0x0200, 0x0200, 0x0040, 0x0200, 0x0024, 0x0200, 0x0200, 0x007b, 0x005b, 0x005d, 0x007d, 0x005c, 0x0200, 0x0200, 0x0200, 0x0b71, 0x0b77, 0x0918, 0x0b72, 0x0b74, 0x0b79, 0x0b75, 0x0b69, 0x0b6f, 0x0b70, 0x0200, 0x007e, 0x0201, 0x0702, 0x0914, 0x0b73, 0x0917, 0x0919, 0x0b67, 0x0b68, 0x0b6a, 0x0b6b, 0x0b6c, 0x0200, 0x0200, 0x0200, 0x0700, 0x0200, 0x0b7a, 0x0b78, 0x0916, 0x0b76, 0x0915, 0x0b6e, 0x0b6d, 0x0200, 0x0200, 0x0200, 0x0700, 0x030c, 0x0703, 0x0200, 0x0207, 0x050c, 0x050d, 0x050e, 0x050f, 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0208, 0x0202, 0x0911, 0x0912, 0x0913, 0x030b, 0x090e, 0x090f, 0x0910, 0x030a, 0x090b, 0x090c, 0x090d, 0x090a, 0x0310, 0x0206, 0x0200, 0x007c, 0x0516, 0x0517, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x030e, 0x0702, 0x030d, 0x0200, 0x0701, 0x0205, 0x0114, 0x0603, 0x0118, 0x0601, 0x0602, 0x0117, 0x0600, 0x0119, 0x0115, 0x0116, 0x011a, 0x010c, 0x010d, 0x011b, 0x011c, 0x0110, 0x0311, 0x011d, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, }, ctrl_map[NR_KEYS] = { 0x0200, 0x0200, 0x0200, 0x0000, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, 0x007f, 0x0200, 0x0200, 0x001f, 0x0200, 0x0008, 0x0200, 0x0011, 0x0017, 0x0005, 0x0012, 0x0014, 0x0019, 0x0015, 0x0009, 0x000f, 0x0010, 0x001b, 0x001d, 0x0201, 0x0702, 0x0001, 0x0013, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x0200, 0x0007, 0x0000, 0x0700, 0x001c, 0x001a, 0x0018, 0x0003, 0x0016, 0x0002, 0x000e, 0x000d, 0x0200, 0x020e, 0x007f, 0x0700, 0x030c, 0x0703, 0x0000, 0x0207, 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, 0x0108, 0x0109, 0x0208, 0x0204, 0x0307, 0x0308, 0x0309, 0x030b, 0x0304, 0x0305, 0x0306, 0x030a, 0x0301, 0x0302, 0x0303, 0x0300, 0x0310, 0x0206, 0x0200, 0x0200, 0x010a, 0x010b, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x030e, 0x0702, 0x030d, 0x001c, 0x0701, 0x0205, 0x0114, 0x0603, 0x0118, 0x0601, 0x0602, 0x0117, 0x0600, 0x0119, 0x0115, 0x0116, 0x011a, 0x010c, 0x010d, 0x011b, 0x011c, 0x0110, 0x0311, 0x011d, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, 0x0200, }; u_short *linux_keytabs[] = { plain_map, shift_map, altgr_map, altgr_map, ctrl_map }; #endif /* * We come here in a last attempt to satisfy a Linux ioctl() call */ int linux_machdepioctl(p, v, retval) struct proc *p; void *v; register_t *retval; { struct linux_sys_ioctl_args /* { syscallarg(int) fd; syscallarg(u_long) com; syscallarg(caddr_t) data; } */ *uap = v; struct sys_ioctl_args bia; u_long com; #if (NVT > 0) || (NWSDISPLAY > 0) int error; struct vt_mode lvt; caddr_t bvtp, sg; #endif #if (NWSDISPLAY > 0) && defined(XSERVER) struct kbentry kbe; #endif SCARG(&bia, fd) = SCARG(uap, fd); SCARG(&bia, data) = SCARG(uap, data); com = SCARG(uap, com); switch (com) { #if (NVT > 0) || (NWSDISPLAY > 0) case LINUX_KDGKBMODE: com = KDGKBMODE; break; case LINUX_KDSKBMODE: com = KDSKBMODE; if ((unsigned)SCARG(uap, data) == LINUX_K_MEDIUMRAW) SCARG(&bia, data) = (caddr_t)K_RAW; break; case LINUX_KDMKTONE: com = KDMKTONE; break; case LINUX_KDSETMODE: com = KDSETMODE; break; case LINUX_KDENABIO: com = KDENABIO; break; case LINUX_KDDISABIO: com = KDDISABIO; break; case LINUX_KDGETLED: com = KDGETLED; break; case LINUX_KDSETLED: com = KDSETLED; break; case LINUX_VT_OPENQRY: com = VT_OPENQRY; break; case LINUX_VT_GETMODE: SCARG(&bia, com) = VT_GETMODE; if ((error = sys_ioctl(p, &bia, retval))) return error; if ((error = copyin(SCARG(uap, data), (caddr_t)&lvt, sizeof (struct vt_mode)))) return error; lvt.relsig = native_to_linux_sig[lvt.relsig]; lvt.acqsig = native_to_linux_sig[lvt.acqsig]; lvt.frsig = native_to_linux_sig[lvt.frsig]; return copyout((caddr_t)&lvt, SCARG(uap, data), sizeof (struct vt_mode)); case LINUX_VT_SETMODE: com = VT_SETMODE; if ((error = copyin(SCARG(uap, data), (caddr_t)&lvt, sizeof (struct vt_mode)))) return error; lvt.relsig = linux_to_native_sig[lvt.relsig]; lvt.acqsig = linux_to_native_sig[lvt.acqsig]; lvt.frsig = linux_to_native_sig[lvt.frsig]; sg = stackgap_init(p->p_emul); bvtp = stackgap_alloc(&sg, sizeof (struct vt_mode)); if ((error = copyout(&lvt, bvtp, sizeof (struct vt_mode)))) return error; SCARG(&bia, data) = bvtp; break; case LINUX_VT_RELDISP: com = VT_RELDISP; break; case LINUX_VT_ACTIVATE: com = VT_ACTIVATE; break; case LINUX_VT_WAITACTIVE: com = VT_WAITACTIVE; break; #endif #if (NWSDISPLAY > 0) case LINUX_VT_GETSTATE: com = VT_GETSTATE; break; #ifdef XSERVER case LINUX_KDGKBTYPE: /* This is what Linux does. */ return (subyte(SCARG(uap, data), KB_101)); case LINUX_KDGKBENT: /* * The Linux KDGKBENT ioctl is different from the * SYSV original. So we handle it in machdep code. * XXX We should use keyboard mapping information * from wsdisplay, but this would be expensive. */ if ((error = copyin(SCARG(uap, data), &kbe, sizeof(struct kbentry)))) return (error); if (kbe.kb_table >= sizeof(linux_keytabs) / sizeof(u_short *) || kbe.kb_index >= NR_KEYS) return (EINVAL); kbe.kb_value = linux_keytabs[kbe.kb_table][kbe.kb_index]; return (copyout(&kbe, SCARG(uap, data), sizeof(struct kbentry))); #endif #endif default: printf("linux_machdepioctl: invalid ioctl %08lx\n", com); return EINVAL; } SCARG(&bia, com) = com; return sys_ioctl(p, &bia, retval); } /* * Set I/O permissions for a process. Just set the maximum level * right away (ignoring the argument), otherwise we would have * to rely on I/O permission maps, which are not implemented. */ int linux_sys_iopl(p, v, retval) struct proc *p; void *v; register_t *retval; { #if 0 struct linux_sys_iopl_args /* { syscallarg(int) level; } */ *uap = v; #endif struct trapframe *fp = p->p_md.md_regs; if (suser(p->p_ucred, &p->p_acflag) != 0) return EPERM; fp->tf_eflags |= PSL_IOPL; *retval = 0; return 0; } /* * See above. If a root process tries to set access to an I/O port, * just let it have the whole range. */ int linux_sys_ioperm(p, v, retval) struct proc *p; void *v; register_t *retval; { struct linux_sys_ioperm_args /* { syscallarg(unsigned int) lo; syscallarg(unsigned int) hi; syscallarg(int) val; } */ *uap = v; struct trapframe *fp = p->p_md.md_regs; if (suser(p->p_ucred, &p->p_acflag) != 0) return EPERM; if (SCARG(uap, val)) fp->tf_eflags |= PSL_IOPL; *retval = 0; return 0; }
MarginC/kame
netbsd/sys/compat/linux/arch/i386/linux_machdep.c
C
bsd-3-clause
21,192
module SimpleLang.Syntax where import Data.Functor.Identity import Data.Maybe (fromMaybe) import qualified SimpleLang.Parser as P import Text.Parsec import qualified Text.Parsec.Expr as Ex import qualified Text.Parsec.Token as Tok data Expr = Tr | Fl | Zero | IsZero Expr | Succ Expr | Pred Expr | If Expr Expr Expr deriving (Eq, Show) ----------------- -- Parsing -- ----------------- prefixOp :: String -> (a -> a) -> Ex.Operator String () Identity a prefixOp s f = Ex.Prefix (P.reservedOp s >> return f) -- table of operations for our language table :: Ex.OperatorTable String () Identity Expr table = [ [ prefixOp "succ" Succ , prefixOp "pred" Pred , prefixOp "iszero" IsZero ] ] -- Constants : true, false, zero :: P.Parser Expr true = P.reserved "true" >> return Tr false = P.reserved "false" >> return Fl zero = P.reserved "0" >> return Zero ifthen :: P.Parser Expr ifthen = do P.reserved "if" cond <- expr P.reservedOp "then" tr <- expr P.reserved "else" fl <- expr return (If cond tr fl) factor :: P.Parser Expr factor = true <|> false <|> zero <|> ifthen <|> P.parens expr expr :: P.Parser Expr expr = Ex.buildExpressionParser table factor contents :: P.Parser a -> P.Parser a contents p = do Tok.whiteSpace P.lexer r <- p eof return r -- The toplevel function we'll expose from our Parse module is parseExpr -- which will be called as the entry point in our REPL. parseExpr :: String -> Either ParseError Expr parseExpr = parse (contents expr) "<stdin>" ----------------- -- Evaluation -- ----------------- isNum :: Expr -> Bool isNum Zero = True isNum (Succ t) = isNum t isNum _ = False isVal :: Expr -> Bool isVal Tr = True isVal Fl = True isVal t | isNum t = True isVal _ = False eval' :: Expr -> Maybe Expr eval' x = case x of IsZero Zero -> Just Tr IsZero (Succ t) | isNum t -> Just Fl IsZero t -> IsZero <$> eval' t Succ t -> Succ <$> eval' t Pred Zero -> Just Zero Pred (Succ t) | isNum t -> Just t Pred t -> Pred <$> eval' t If Tr c _ -> Just c If Fl _ a -> Just a If t c a -> (\t' -> If t' c a) <$> eval' t _ -> Nothing -- we need that function to be able to evaluate multiple times nf :: Expr -> Expr nf x = fromMaybe x (nf <$> eval' x) eval :: Expr -> Maybe Expr eval t = case nf t of nft | isVal nft -> Just nft | otherwise -> Nothing -- term is "stuck"
AlphaMarc/WYAH
src/SimpleLang/Syntax.hs
Haskell
bsd-3-clause
2,692
/*- * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)in.h 8.3 (Berkeley) 1/3/94 * $FreeBSD$ */ #ifndef _NETINET_IN_H_ #define _NETINET_IN_H_ #include <sys/cdefs.h> #include <sys/_types.h> #include <machine/endian.h> /* Protocols common to RFC 1700, POSIX, and X/Open. */ #define IPPROTO_IP 0 /* dummy for IP */ #define IPPROTO_ICMP 1 /* control message protocol */ #define IPPROTO_TCP 6 /* tcp */ #define IPPROTO_UDP 17 /* user datagram protocol */ #define INADDR_ANY (u_int32_t)0x00000000 #define INADDR_BROADCAST (u_int32_t)0xffffffff /* must be masked */ #ifndef _UINT8_T_DECLARED typedef __uint8_t uint8_t; #define _UINT8_T_DECLARED #endif #ifndef _UINT16_T_DECLARED typedef __uint16_t uint16_t; #define _UINT16_T_DECLARED #endif #ifndef _UINT32_T_DECLARED typedef __uint32_t uint32_t; #define _UINT32_T_DECLARED #endif #ifndef _IN_ADDR_T_DECLARED typedef uint32_t in_addr_t; #define _IN_ADDR_T_DECLARED #endif #ifndef _IN_PORT_T_DECLARED typedef uint16_t in_port_t; #define _IN_PORT_T_DECLARED #endif #ifndef _SA_FAMILY_T_DECLARED typedef __sa_family_t sa_family_t; #define _SA_FAMILY_T_DECLARED #endif /* Internet address (a structure for historical reasons). */ #ifndef _STRUCT_IN_ADDR_DECLARED struct in_addr { in_addr_t s_addr; }; #define _STRUCT_IN_ADDR_DECLARED #endif #ifndef _SOCKLEN_T_DECLARED typedef __socklen_t socklen_t; #define _SOCKLEN_T_DECLARED #endif #include <sys/_sockaddr_storage.h> /* Socket address, internet style. */ struct sockaddr_in { uint8_t sin_len; sa_family_t sin_family; in_port_t sin_port; struct in_addr sin_addr; char sin_zero[8]; }; #if !defined(_KERNEL) && __BSD_VISIBLE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS uint32_t htonl(uint32_t); uint16_t htons(uint16_t); uint32_t ntohl(uint32_t); uint16_t ntohs(uint16_t); __END_DECLS #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif #endif /* !_KERNEL && __BSD_VISIBLE */ #if __POSIX_VISIBLE >= 200112 #define IPPROTO_IPV6 41 /* IP6 header */ #define IPPROTO_RAW 255 /* raw IP packet */ #define INET_ADDRSTRLEN 16 #endif #if __BSD_VISIBLE /* * Constants and structures defined by the internet system, * Per RFC 790, September 1981, and numerous additions. */ /* * Protocols (RFC 1700) */ #define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */ #define IPPROTO_IGMP 2 /* group mgmt protocol */ #define IPPROTO_GGP 3 /* gateway^2 (deprecated) */ #define IPPROTO_IPV4 4 /* IPv4 encapsulation */ #define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */ #define IPPROTO_ST 7 /* Stream protocol II */ #define IPPROTO_EGP 8 /* exterior gateway protocol */ #define IPPROTO_PIGP 9 /* private interior gateway */ #define IPPROTO_RCCMON 10 /* BBN RCC Monitoring */ #define IPPROTO_NVPII 11 /* network voice protocol*/ #define IPPROTO_PUP 12 /* pup */ #define IPPROTO_ARGUS 13 /* Argus */ #define IPPROTO_EMCON 14 /* EMCON */ #define IPPROTO_XNET 15 /* Cross Net Debugger */ #define IPPROTO_CHAOS 16 /* Chaos*/ #define IPPROTO_MUX 18 /* Multiplexing */ #define IPPROTO_MEAS 19 /* DCN Measurement Subsystems */ #define IPPROTO_HMP 20 /* Host Monitoring */ #define IPPROTO_PRM 21 /* Packet Radio Measurement */ #define IPPROTO_IDP 22 /* xns idp */ #define IPPROTO_TRUNK1 23 /* Trunk-1 */ #define IPPROTO_TRUNK2 24 /* Trunk-2 */ #define IPPROTO_LEAF1 25 /* Leaf-1 */ #define IPPROTO_LEAF2 26 /* Leaf-2 */ #define IPPROTO_RDP 27 /* Reliable Data */ #define IPPROTO_IRTP 28 /* Reliable Transaction */ #define IPPROTO_TP 29 /* tp-4 w/ class negotiation */ #define IPPROTO_BLT 30 /* Bulk Data Transfer */ #define IPPROTO_NSP 31 /* Network Services */ #define IPPROTO_INP 32 /* Merit Internodal */ #define IPPROTO_SEP 33 /* Sequential Exchange */ #define IPPROTO_3PC 34 /* Third Party Connect */ #define IPPROTO_IDPR 35 /* InterDomain Policy Routing */ #define IPPROTO_XTP 36 /* XTP */ #define IPPROTO_DDP 37 /* Datagram Delivery */ #define IPPROTO_CMTP 38 /* Control Message Transport */ #define IPPROTO_TPXX 39 /* TP++ Transport */ #define IPPROTO_IL 40 /* IL transport protocol */ #define IPPROTO_SDRP 42 /* Source Demand Routing */ #define IPPROTO_ROUTING 43 /* IP6 routing header */ #define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */ #define IPPROTO_IDRP 45 /* InterDomain Routing*/ #define IPPROTO_RSVP 46 /* resource reservation */ #define IPPROTO_GRE 47 /* General Routing Encap. */ #define IPPROTO_MHRP 48 /* Mobile Host Routing */ #define IPPROTO_BHA 49 /* BHA */ #define IPPROTO_ESP 50 /* IP6 Encap Sec. Payload */ #define IPPROTO_AH 51 /* IP6 Auth Header */ #define IPPROTO_INLSP 52 /* Integ. Net Layer Security */ #define IPPROTO_SWIPE 53 /* IP with encryption */ #define IPPROTO_NHRP 54 /* Next Hop Resolution */ #define IPPROTO_MOBILE 55 /* IP Mobility */ #define IPPROTO_TLSP 56 /* Transport Layer Security */ #define IPPROTO_SKIP 57 /* SKIP */ #define IPPROTO_ICMPV6 58 /* ICMP6 */ #define IPPROTO_NONE 59 /* IP6 no next header */ #define IPPROTO_DSTOPTS 60 /* IP6 destination option */ #define IPPROTO_AHIP 61 /* any host internal protocol */ #define IPPROTO_CFTP 62 /* CFTP */ #define IPPROTO_HELLO 63 /* "hello" routing protocol */ #define IPPROTO_SATEXPAK 64 /* SATNET/Backroom EXPAK */ #define IPPROTO_KRYPTOLAN 65 /* Kryptolan */ #define IPPROTO_RVD 66 /* Remote Virtual Disk */ #define IPPROTO_IPPC 67 /* Pluribus Packet Core */ #define IPPROTO_ADFS 68 /* Any distributed FS */ #define IPPROTO_SATMON 69 /* Satnet Monitoring */ #define IPPROTO_VISA 70 /* VISA Protocol */ #define IPPROTO_IPCV 71 /* Packet Core Utility */ #define IPPROTO_CPNX 72 /* Comp. Prot. Net. Executive */ #define IPPROTO_CPHB 73 /* Comp. Prot. HeartBeat */ #define IPPROTO_WSN 74 /* Wang Span Network */ #define IPPROTO_PVP 75 /* Packet Video Protocol */ #define IPPROTO_BRSATMON 76 /* BackRoom SATNET Monitoring */ #define IPPROTO_ND 77 /* Sun net disk proto (temp.) */ #define IPPROTO_WBMON 78 /* WIDEBAND Monitoring */ #define IPPROTO_WBEXPAK 79 /* WIDEBAND EXPAK */ #define IPPROTO_EON 80 /* ISO cnlp */ #define IPPROTO_VMTP 81 /* VMTP */ #define IPPROTO_SVMTP 82 /* Secure VMTP */ #define IPPROTO_VINES 83 /* Banyon VINES */ #define IPPROTO_TTP 84 /* TTP */ #define IPPROTO_IGP 85 /* NSFNET-IGP */ #define IPPROTO_DGP 86 /* dissimilar gateway prot. */ #define IPPROTO_TCF 87 /* TCF */ #define IPPROTO_IGRP 88 /* Cisco/GXS IGRP */ #define IPPROTO_OSPFIGP 89 /* OSPFIGP */ #define IPPROTO_SRPC 90 /* Strite RPC protocol */ #define IPPROTO_LARP 91 /* Locus Address Resoloution */ #define IPPROTO_MTP 92 /* Multicast Transport */ #define IPPROTO_AX25 93 /* AX.25 Frames */ #define IPPROTO_IPEIP 94 /* IP encapsulated in IP */ #define IPPROTO_MICP 95 /* Mobile Int.ing control */ #define IPPROTO_SCCSP 96 /* Semaphore Comm. security */ #define IPPROTO_ETHERIP 97 /* Ethernet IP encapsulation */ #define IPPROTO_ENCAP 98 /* encapsulation header */ #define IPPROTO_APES 99 /* any private encr. scheme */ #define IPPROTO_GMTP 100 /* GMTP*/ #define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */ #define IPPROTO_SCTP 132 /* SCTP */ #define IPPROTO_MH 135 /* IPv6 Mobility Header */ /* 101-254: Partly Unassigned */ #define IPPROTO_PIM 103 /* Protocol Independent Mcast */ #define IPPROTO_CARP 112 /* CARP */ #define IPPROTO_PGM 113 /* PGM */ #define IPPROTO_MPLS 137 /* MPLS-in-IP */ #define IPPROTO_PFSYNC 240 /* PFSYNC */ /* 255: Reserved */ /* BSD Private, local use, namespace incursion, no longer used */ #define IPPROTO_OLD_DIVERT 254 /* OLD divert pseudo-proto */ #define IPPROTO_MAX 256 /* last return value of *_input(), meaning "all job for this pkt is done". */ #define IPPROTO_DONE 257 /* Only used internally, so can be outside the range of valid IP protocols. */ #define IPPROTO_DIVERT 258 /* divert pseudo-protocol */ #define IPPROTO_SEND 259 /* SeND pseudo-protocol */ /* * Defined to avoid confusion. The master value is defined by * PROTO_SPACER in sys/protosw.h. */ #define IPPROTO_SPACER 32767 /* spacer for loadable protos */ /* * Local port number conventions: * * When a user does a bind(2) or connect(2) with a port number of zero, * a non-conflicting local port address is chosen. * The default range is IPPORT_HIFIRSTAUTO through * IPPORT_HILASTAUTO, although that is settable by sysctl. * * A user may set the IPPROTO_IP option IP_PORTRANGE to change this * default assignment range. * * The value IP_PORTRANGE_DEFAULT causes the default behavior. * * The value IP_PORTRANGE_HIGH changes the range of candidate port numbers * into the "high" range. These are reserved for client outbound connections * which do not want to be filtered by any firewalls. * * The value IP_PORTRANGE_LOW changes the range to the "low" are * that is (by convention) restricted to privileged processes. This * convention is based on "vouchsafe" principles only. It is only secure * if you trust the remote host to restrict these ports. * * The default range of ports and the high range can be changed by * sysctl(3). (net.inet.ip.port{hi,low}{first,last}_auto) * * Changing those values has bad security implications if you are * using a stateless firewall that is allowing packets outside of that * range in order to allow transparent outgoing connections. * * Such a firewall configuration will generally depend on the use of these * default values. If you change them, you may find your Security * Administrator looking for you with a heavy object. * * For a slightly more orthodox text view on this: * * ftp://ftp.isi.edu/in-notes/iana/assignments/port-numbers * * port numbers are divided into three ranges: * * 0 - 1023 Well Known Ports * 1024 - 49151 Registered Ports * 49152 - 65535 Dynamic and/or Private Ports * */ /* * Ports < IPPORT_RESERVED are reserved for * privileged processes (e.g. root). (IP_PORTRANGE_LOW) */ #define IPPORT_RESERVED 1024 /* * Default local port range, used by IP_PORTRANGE_DEFAULT */ #define IPPORT_EPHEMERALFIRST 10000 #define IPPORT_EPHEMERALLAST 65535 /* * Dynamic port range, used by IP_PORTRANGE_HIGH. */ #define IPPORT_HIFIRSTAUTO 49152 #define IPPORT_HILASTAUTO 65535 /* * Scanning for a free reserved port return a value below IPPORT_RESERVED, * but higher than IPPORT_RESERVEDSTART. Traditionally the start value was * 512, but that conflicts with some well-known-services that firewalls may * have a fit if we use. */ #define IPPORT_RESERVEDSTART 600 #define IPPORT_MAX 65535 /* * Definitions of bits in internet address integers. * On subnets, the decomposition of addresses to host and net parts * is done according to subnet mask, not the masks here. */ #define IN_CLASSA(i) (((u_int32_t)(i) & 0x80000000) == 0) #define IN_CLASSA_NET 0xff000000 #define IN_CLASSA_NSHIFT 24 #define IN_CLASSA_HOST 0x00ffffff #define IN_CLASSA_MAX 128 #define IN_CLASSB(i) (((u_int32_t)(i) & 0xc0000000) == 0x80000000) #define IN_CLASSB_NET 0xffff0000 #define IN_CLASSB_NSHIFT 16 #define IN_CLASSB_HOST 0x0000ffff #define IN_CLASSB_MAX 65536 #define IN_CLASSC(i) (((u_int32_t)(i) & 0xe0000000) == 0xc0000000) #define IN_CLASSC_NET 0xffffff00 #define IN_CLASSC_NSHIFT 8 #define IN_CLASSC_HOST 0x000000ff #define IN_CLASSD(i) (((u_int32_t)(i) & 0xf0000000) == 0xe0000000) #define IN_CLASSD_NET 0xf0000000 /* These ones aren't really */ #define IN_CLASSD_NSHIFT 28 /* net and host fields, but */ #define IN_CLASSD_HOST 0x0fffffff /* routing needn't know. */ #define IN_MULTICAST(i) IN_CLASSD(i) #define IN_EXPERIMENTAL(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) #define IN_BADCLASS(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) #define IN_LINKLOCAL(i) (((u_int32_t)(i) & 0xffff0000) == 0xa9fe0000) #define IN_LOOPBACK(i) (((u_int32_t)(i) & 0xff000000) == 0x7f000000) #define IN_ZERONET(i) (((u_int32_t)(i) & 0xff000000) == 0) #define IN_PRIVATE(i) ((((u_int32_t)(i) & 0xff000000) == 0x0a000000) || \ (((u_int32_t)(i) & 0xfff00000) == 0xac100000) || \ (((u_int32_t)(i) & 0xffff0000) == 0xc0a80000)) #define IN_LOCAL_GROUP(i) (((u_int32_t)(i) & 0xffffff00) == 0xe0000000) #define IN_ANY_LOCAL(i) (IN_LINKLOCAL(i) || IN_LOCAL_GROUP(i)) #define INADDR_LOOPBACK (u_int32_t)0x7f000001 #ifndef _KERNEL #define INADDR_NONE 0xffffffff /* -1 return */ #endif #define INADDR_UNSPEC_GROUP (u_int32_t)0xe0000000 /* 224.0.0.0 */ #define INADDR_ALLHOSTS_GROUP (u_int32_t)0xe0000001 /* 224.0.0.1 */ #define INADDR_ALLRTRS_GROUP (u_int32_t)0xe0000002 /* 224.0.0.2 */ #define INADDR_ALLRPTS_GROUP (u_int32_t)0xe0000016 /* 224.0.0.22, IGMPv3 */ #define INADDR_CARP_GROUP (u_int32_t)0xe0000012 /* 224.0.0.18 */ #define INADDR_PFSYNC_GROUP (u_int32_t)0xe00000f0 /* 224.0.0.240 */ #define INADDR_ALLMDNS_GROUP (u_int32_t)0xe00000fb /* 224.0.0.251 */ #define INADDR_MAX_LOCAL_GROUP (u_int32_t)0xe00000ff /* 224.0.0.255 */ #define IN_LOOPBACKNET 127 /* official! */ #define IN_RFC3021_MASK (u_int32_t)0xfffffffe /* * Options for use with [gs]etsockopt at the IP level. * First word of comment is data type; bool is stored in int. */ #define IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */ #define IP_HDRINCL 2 /* int; header is included with data */ #define IP_TOS 3 /* int; IP type of service and preced. */ #define IP_TTL 4 /* int; IP time to live */ #define IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */ #define IP_RECVRETOPTS 6 /* bool; receive IP opts for response */ #define IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */ #define IP_SENDSRCADDR IP_RECVDSTADDR /* cmsg_type to set src addr */ #define IP_RETOPTS 8 /* ip_opts; set/get IP options */ #define IP_MULTICAST_IF 9 /* struct in_addr *or* struct ip_mreqn; * set/get IP multicast i/f */ #define IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */ #define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */ #define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */ #define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */ #define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */ #define IP_RSVP_ON 15 /* enable RSVP in kernel */ #define IP_RSVP_OFF 16 /* disable RSVP in kernel */ #define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */ #define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */ #define IP_PORTRANGE 19 /* int; range to choose for unspec port */ #define IP_RECVIF 20 /* bool; receive reception if w/dgram */ /* for IPSEC */ #define IP_IPSEC_POLICY 21 /* int; set/get security policy */ #define IP_FAITH 22 /* bool; accept FAITH'ed connections */ #define IP_ONESBCAST 23 /* bool: send all-ones broadcast */ #define IP_BINDANY 24 /* bool: allow bind to any address */ /* * Options for controlling the firewall and dummynet. * Historical options (from 40 to 64) will eventually be * replaced by only two options, IP_FW3 and IP_DUMMYNET3. */ #define IP_FW_TABLE_ADD 40 /* add entry */ #define IP_FW_TABLE_DEL 41 /* delete entry */ #define IP_FW_TABLE_FLUSH 42 /* flush table */ #define IP_FW_TABLE_GETSIZE 43 /* get table size */ #define IP_FW_TABLE_LIST 44 /* list table contents */ #define IP_FW3 48 /* generic ipfw v.3 sockopts */ #define IP_DUMMYNET3 49 /* generic dummynet v.3 sockopts */ #define IP_FW_ADD 50 /* add a firewall rule to chain */ #define IP_FW_DEL 51 /* delete a firewall rule from chain */ #define IP_FW_FLUSH 52 /* flush firewall rule chain */ #define IP_FW_ZERO 53 /* clear single/all firewall counter(s) */ #define IP_FW_GET 54 /* get entire firewall rule chain */ #define IP_FW_RESETLOG 55 /* reset logging counters */ #define IP_FW_NAT_CFG 56 /* add/config a nat rule */ #define IP_FW_NAT_DEL 57 /* delete a nat rule */ #define IP_FW_NAT_GET_CONFIG 58 /* get configuration of a nat rule */ #define IP_FW_NAT_GET_LOG 59 /* get log of a nat rule */ #define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */ #define IP_DUMMYNET_DEL 61 /* delete a dummynet pipe from chain */ #define IP_DUMMYNET_FLUSH 62 /* flush dummynet */ #define IP_DUMMYNET_GET 64 /* get entire dummynet pipes */ #define IP_RECVTTL 65 /* bool; receive IP TTL w/dgram */ #define IP_MINTTL 66 /* minimum TTL for packet or drop */ #define IP_DONTFRAG 67 /* don't fragment packet */ #define IP_RECVTOS 68 /* bool; receive IP TOS w/dgram */ /* IPv4 Source Filter Multicast API [RFC3678] */ #define IP_ADD_SOURCE_MEMBERSHIP 70 /* join a source-specific group */ #define IP_DROP_SOURCE_MEMBERSHIP 71 /* drop a single source */ #define IP_BLOCK_SOURCE 72 /* block a source */ #define IP_UNBLOCK_SOURCE 73 /* unblock a source */ /* The following option is private; do not use it from user applications. */ #define IP_MSFILTER 74 /* set/get filter list */ /* Protocol Independent Multicast API [RFC3678] */ #define MCAST_JOIN_GROUP 80 /* join an any-source group */ #define MCAST_LEAVE_GROUP 81 /* leave all sources for group */ #define MCAST_JOIN_SOURCE_GROUP 82 /* join a source-specific group */ #define MCAST_LEAVE_SOURCE_GROUP 83 /* leave a single source */ #define MCAST_BLOCK_SOURCE 84 /* block a source */ #define MCAST_UNBLOCK_SOURCE 85 /* unblock a source */ /* * Defaults and limits for options */ #define IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */ #define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ /* * The imo_membership vector for each socket is now dynamically allocated at * run-time, bounded by USHRT_MAX, and is reallocated when needed, sized * according to a power-of-two increment. */ #define IP_MIN_MEMBERSHIPS 31 #define IP_MAX_MEMBERSHIPS 4095 #define IP_MAX_SOURCE_FILTER 1024 /* XXX to be unused */ /* * Default resource limits for IPv4 multicast source filtering. * These may be modified by sysctl. */ #define IP_MAX_GROUP_SRC_FILTER 512 /* sources per group */ #define IP_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ #define IP_MAX_SOCK_MUTE_FILTER 128 /* XXX no longer used */ /* * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP. */ struct ip_mreq { struct in_addr imr_multiaddr; /* IP multicast address of group */ struct in_addr imr_interface; /* local IP address of interface */ }; /* * Modified argument structure for IP_MULTICAST_IF, obtained from Linux. * This is used to specify an interface index for multicast sends, as * the IPv4 legacy APIs do not support this (unless IP_SENDIF is available). */ struct ip_mreqn { struct in_addr imr_multiaddr; /* IP multicast address of group */ struct in_addr imr_address; /* local IP address of interface */ int imr_ifindex; /* Interface index; cast to uint32_t */ }; /* * Argument structure for IPv4 Multicast Source Filter APIs. [RFC3678] */ struct ip_mreq_source { struct in_addr imr_multiaddr; /* IP multicast address of group */ struct in_addr imr_sourceaddr; /* IP address of source */ struct in_addr imr_interface; /* local IP address of interface */ }; /* * Argument structures for Protocol-Independent Multicast Source * Filter APIs. [RFC3678] */ struct group_req { uint32_t gr_interface; /* interface index */ struct sockaddr_storage gr_group; /* group address */ }; struct group_source_req { uint32_t gsr_interface; /* interface index */ struct sockaddr_storage gsr_group; /* group address */ struct sockaddr_storage gsr_source; /* source address */ }; #ifndef __MSFILTERREQ_DEFINED #define __MSFILTERREQ_DEFINED /* * The following structure is private; do not use it from user applications. * It is used to communicate IP_MSFILTER/IPV6_MSFILTER information between * the RFC 3678 libc functions and the kernel. */ struct __msfilterreq { uint32_t msfr_ifindex; /* interface index */ uint32_t msfr_fmode; /* filter mode for group */ uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ struct sockaddr_storage msfr_group; /* group address */ struct sockaddr_storage *msfr_srcs; /* pointer to the first member * of a contiguous array of * sources to filter in full. */ }; #endif struct sockaddr; /* * Advanced (Full-state) APIs [RFC3678] * The RFC specifies uint_t for the 6th argument to [sg]etsourcefilter(). * We use uint32_t here to be consistent. */ int setipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t, uint32_t, struct in_addr *); int getipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t *, uint32_t *, struct in_addr *); int setsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, uint32_t, uint32_t, struct sockaddr_storage *); int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, uint32_t *, uint32_t *, struct sockaddr_storage *); /* * Filter modes; also used to represent per-socket filter mode internally. */ #define MCAST_UNDEFINED 0 /* fmode: not yet defined */ #define MCAST_INCLUDE 1 /* fmode: include these source(s) */ #define MCAST_EXCLUDE 2 /* fmode: exclude these source(s) */ /* * Argument for IP_PORTRANGE: * - which range to search when port is unspecified at bind() or connect() */ #define IP_PORTRANGE_DEFAULT 0 /* default range */ #define IP_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ #define IP_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ /* * Definitions for inet sysctl operations. * * Third level is protocol number. * Fourth level is desired variable within that protocol. */ #define IPPROTO_MAXID (IPPROTO_AH + 1) /* don't list to IPPROTO_MAX */ #define CTL_IPPROTO_NAMES { \ { "ip", CTLTYPE_NODE }, \ { "icmp", CTLTYPE_NODE }, \ { "igmp", CTLTYPE_NODE }, \ { "ggp", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { "tcp", CTLTYPE_NODE }, \ { 0, 0 }, \ { "egp", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { "pup", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { "udp", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { "idp", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { "ipsec", CTLTYPE_NODE }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { 0, 0 }, \ { "pim", CTLTYPE_NODE }, \ } /* * Names for IP sysctl objects */ #define IPCTL_FORWARDING 1 /* act as router */ #define IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ #define IPCTL_DEFTTL 3 /* default TTL */ #ifdef notyet #define IPCTL_DEFMTU 4 /* default MTU */ #endif #define IPCTL_RTEXPIRE 5 /* cloned route expiration time */ #define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */ #define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */ #define IPCTL_SOURCEROUTE 8 /* may perform source routes */ #define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */ #define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */ #define IPCTL_INTRQDROPS 11 /* number of netisr q drops */ #define IPCTL_STATS 12 /* ipstat structure */ #define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */ #define IPCTL_FASTFORWARDING 14 /* use fast IP forwarding code */ #define IPCTL_KEEPFAITH 15 /* FAITH IPv4->IPv6 translater ctl */ #define IPCTL_GIF_TTL 16 /* default TTL for gif encap packet */ #define IPCTL_MAXID 17 #endif /* __BSD_VISIBLE */ #ifdef _KERNEL struct ifnet; struct mbuf; /* forward declarations for Standard C */ int in_broadcast(struct in_addr, struct ifnet *); int in_canforward(struct in_addr); int in_localaddr(struct in_addr); int in_localip(struct in_addr); int inet_aton(const char *, struct in_addr *); /* in libkern */ char *inet_ntoa(struct in_addr); /* in libkern */ char *inet_ntoa_r(struct in_addr ina, char *buf); /* in libkern */ char *inet_ntop(int, const void *, char *, socklen_t); /* in libkern */ int inet_pton(int af, const char *, void *); /* in libkern */ void in_ifdetach(struct ifnet *); #define in_hosteq(s, t) ((s).s_addr == (t).s_addr) #define in_nullhost(x) ((x).s_addr == INADDR_ANY) #define in_allhosts(x) ((x).s_addr == htonl(INADDR_ALLHOSTS_GROUP)) #define satosin(sa) ((struct sockaddr_in *)(sa)) #define sintosa(sin) ((struct sockaddr *)(sin)) #define ifatoia(ifa) ((struct in_ifaddr *)(ifa)) #endif /* _KERNEL */ /* INET6 stuff */ #if __POSIX_VISIBLE >= 200112 #define __KAME_NETINET_IN_H_INCLUDED_ #include <netinet6/in6.h> #undef __KAME_NETINET_IN_H_INCLUDED_ #endif #endif /* !_NETINET_IN_H_*/
jhbsz/OSI-OS
sys/netinet/in.h
C
bsd-3-clause
27,046
""" Tests for values coercion in setitem-like operations on DataFrame. For the most part, these should be multi-column DataFrames, otherwise we would share the tests with Series. """ import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, MultiIndex, NaT, Series, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameSetitemCoercion: @pytest.mark.xfail(reason="Unnecessary cast.") @pytest.mark.parametrize("consolidate", [True, False]) def test_loc_setitem_multiindex_columns(self, consolidate): # GH#18415 Setting values in a single column preserves dtype, # while setting them in multiple columns did unwanted cast. # Note that A here has 2 blocks, below we do the same thing # with a consolidated frame. A = DataFrame(np.zeros((6, 5), dtype=np.float32)) A = pd.concat([A, A], axis=1, keys=[1, 2]) if consolidate: A = A._consolidate() A.loc[2:3, (1, slice(2, 3))] = np.ones((2, 2), dtype=np.float32) assert (A.dtypes == np.float32).all() A.loc[0:5, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32) assert (A.dtypes == np.float32).all() A.loc[:, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32) assert (A.dtypes == np.float32).all() # TODO: i think this isn't about MultiIndex and could be done with iloc? def test_37477(): # fixed by GH#45121 orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]}) expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]}) df = orig.copy() df.at[1, "B"] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.loc[1, "B"] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.iat[1, 1] = 1.2 tm.assert_frame_equal(df, expected) df = orig.copy() df.iloc[1, 1] = 1.2 tm.assert_frame_equal(df, expected) def test_6942(indexer_al): # check that the .at __setitem__ after setting "Live" actually sets the data start = Timestamp("2014-04-01") t1 = Timestamp("2014-04-23 12:42:38.883082") t2 = Timestamp("2014-04-24 01:33:30.040039") dti = date_range(start, periods=1) orig = DataFrame(index=dti, columns=["timenow", "Live"]) df = orig.copy() indexer_al(df)[start, "timenow"] = t1 df["Live"] = True df.at[start, "timenow"] = t2 assert df.iloc[0, 0] == t2 def test_26395(indexer_al): # .at case fixed by GH#45121 (best guess) df = DataFrame(index=["A", "B", "C"]) df["D"] = 0 indexer_al(df)["C", "D"] = 2 expected = DataFrame({"D": [0, 0, 2]}, index=["A", "B", "C"], dtype=np.int64) tm.assert_frame_equal(df, expected) indexer_al(df)["C", "D"] = 44.5 expected = DataFrame({"D": [0, 0, 44.5]}, index=["A", "B", "C"], dtype=np.float64) tm.assert_frame_equal(df, expected) indexer_al(df)["C", "D"] = "hello" expected = DataFrame({"D": [0, 0, "hello"]}, index=["A", "B", "C"], dtype=object) tm.assert_frame_equal(df, expected) @pytest.mark.xfail(reason="unwanted upcast") def test_15231(): df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) df.loc[2] = Series({"a": 5, "b": 6}) assert (df.dtypes == np.int64).all() df.loc[3] = Series({"a": 7}) # df["a"] doesn't have any NaNs, should not have been cast exp_dtypes = Series([np.int64, np.float64], dtype=object, index=["a", "b"]) tm.assert_series_equal(df.dtypes, exp_dtypes) @pytest.mark.xfail(reason="Unnecessarily upcasts to float64") def test_iloc_setitem_unnecesssary_float_upcasting(): # GH#12255 df = DataFrame( { 0: np.array([1, 3], dtype=np.float32), 1: np.array([2, 4], dtype=np.float32), 2: ["a", "b"], } ) orig = df.copy() values = df[0].values.reshape(2, 1) df.iloc[:, 0:1] = values tm.assert_frame_equal(df, orig) @pytest.mark.xfail(reason="unwanted casting to dt64") def test_12499(): # TODO: OP in GH#12499 used np.datetim64("NaT") instead of pd.NaT, # which has consequences for the expected df["two"] (though i think at # the time it might not have because of a separate bug). See if it makes # a difference which one we use here. ts = Timestamp("2016-03-01 03:13:22.98986", tz="UTC") data = [{"one": 0, "two": ts}] orig = DataFrame(data) df = orig.copy() df.loc[1] = [np.nan, NaT] expected = DataFrame( {"one": [0, np.nan], "two": Series([ts, NaT], dtype="datetime64[ns, UTC]")} ) tm.assert_frame_equal(df, expected) data = [{"one": 0, "two": ts}] df = orig.copy() df.loc[1, :] = [np.nan, NaT] tm.assert_frame_equal(df, expected) @pytest.mark.xfail(reason="Too many columns cast to float64") def test_20476(): mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]]) df = DataFrame(-1, index=range(3), columns=mi) filler = DataFrame([[1, 2, 3.0]] * 3, index=range(3), columns=["a", "b", "c"]) df["A"] = filler expected = DataFrame( { 0: [1, 1, 1], 1: [2, 2, 2], 2: [3.0, 3.0, 3.0], 3: [-1, -1, -1], 4: [-1, -1, -1], 5: [-1, -1, -1], } ) expected.columns = mi exp_dtypes = Series( [np.dtype(np.int64)] * 2 + [np.dtype(np.float64)] + [np.dtype(np.int64)] * 3, index=mi, ) tm.assert_series_equal(df.dtypes, exp_dtypes)
pandas-dev/pandas
pandas/tests/frame/indexing/test_coercion.py
Python
bsd-3-clause
5,463
/* * Copyright (c) 2005, Peter Sommerlad and IFS Institute for Software at HSR Rapperswil, Switzerland * All rights reserved. * * This library/application is free software; you can redistribute and/or modify it under the terms of * the license that is included with this library/application in the file license.txt. */ #include "MultiWriterParser.h" MultiWriterParser::MultiWriterParser(AAT_HTMLReader &reader, AAT_HTMLWriter &writer1, AAT_HTMLWriter &writer2) : AAT_StdHTMLParser(reader, writer1), fSecondWriter(writer2) { } void MultiWriterParser::IntFlush() { fSecondWriter.Flush(); AAT_StdHTMLParser::IntFlush(); } void MultiWriterParser::IntPushNode(Anything &node) { fSecondWriter.PushNode(node); AAT_StdHTMLParser::IntPushNode(node); } void MultiWriterParser::IntPut(Unicode c) { fSecondWriter.Put(c); AAT_StdHTMLParser::IntPut(c); } void MultiWriterParser::IntComment(const String &comment) { fSecondWriter.Comment(comment); AAT_StdHTMLParser::IntComment(comment); } void MultiWriterParser::IntTag(int type, const char *tag) { fSecondWriter.Tag(type, tag); AAT_StdHTMLParser::IntTag(type, tag); } void MultiWriterParser::IntArgument(const String &key, const String &value) { fSecondWriter.Argument(key, value); AAT_StdHTMLParser::IntArgument(key, value); } void MultiWriterParser::IntError(long, const String &msg) { fSecondWriter.Error(fLine, msg); AAT_StdHTMLParser::IntError(fLine, msg); }
zer0infinity/CuteTestForCoastTest
coast/perfTest/src/MultiWriterParser.cpp
C++
bsd-3-clause
1,427
import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm from selection.algorithms.lasso import instance from selection.algorithms.forward_step import forward_stepwise, info_crit_stop, sequential, data_carving_IC def test_FS(k=10): n, p = 100, 200 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n)) for i in range(k): FS.next() print 'first %s variables selected' % k, FS.variables print 'pivots for 3rd selected model knowing that we performed %d steps of forward stepwise' % k print FS.model_pivots(3) print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000) print FS.model_quadratic(3) def test_FS_unknown(k=10): n, p = 100, 200 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 FS = forward_stepwise(X, Y) for i in range(k): FS.next() print 'first %s variables selected' % k, FS.variables print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000) def test_subset(k=10): n, p = 100, 200 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 subset = np.ones(n, np.bool) subset[-10:] = 0 FS = forward_stepwise(X, Y, subset=subset, covariance=0.5**2 * np.identity(n)) for i in range(k): FS.next() print 'first %s variables selected' % k, FS.variables print 'pivots for last variable of 3rd selected model knowing that we performed %d steps of forward stepwise' % k print FS.model_pivots(3, saturated=True) print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000) FS = forward_stepwise(X, Y, subset=subset) for i in range(k): FS.next() print FS.model_pivots(3, saturated=False, which_var=[FS.variables[2]], burnin=5000, ndraw=5000) def test_BIC(k=10, do_sample=True): n, p = 100, 200 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 FS = info_crit_stop(Y, X, 0.5, cost=np.log(n)) final_model = len(FS.variables) - 1 if do_sample: return [p[-1] for p in FS.model_pivots(final_model, saturated=False, burnin=5000, ndraw=5000)] else: saturated_pivots = FS.model_pivots(final_model) return [p[-1] for p in saturated_pivots] def test_sequential(k=10): n, p = 100, 200 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 print sequential(X, Y, sigma=0.5, saturated=True)[1] print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000)[1] print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000)[1] # now use a subset of cases subset = np.ones(n, np.bool) subset[-10:] = 0 print sequential(X, Y, sigma=0.5, saturated=False, ndraw=5000, burnin=5000, subset=subset)[1] print sequential(X, Y, saturated=False, ndraw=5000, burnin=5000, subset=subset)[1] def simulate_null(saturated=True): n, p = 100, 40 X = np.random.standard_normal((n,p)) + 0.4 * np.random.standard_normal(n)[:,None] X /= (X.std(0)[None,:] * np.sqrt(n)) Y = np.random.standard_normal(100) * 0.5 FS = forward_stepwise(X, Y, covariance=0.5**2 * np.identity(n)) for i in range(5): FS.next() return [p[-1] for p in FS.model_pivots(3, saturated=saturated, use_new=False)] def test_ecdf(nsim=1000, BIC=False, saturated=True): P = [] for _ in range(nsim): if not BIC: P.extend(simulate_null(saturated=saturated)) else: P.extend(test_BIC(do_sample=True)) P = np.array(P) ecdf = sm.distributions.ECDF(P) plt.clf() plt.plot(ecdf.x, ecdf.y, linewidth=4, color='black') plt.show() def test_data_carving_IC(n=100, p=200, s=7, sigma=5, rho=0.3, snr=7., split_frac=0.9, ndraw=5000, burnin=1000, df=np.inf, coverage=0.90, compute_intervals=False): counter = 0 while True: counter += 1 X, y, beta, active, sigma = instance(n=n, p=p, s=s, sigma=sigma, rho=rho, snr=snr, df=df) mu = np.dot(X, beta) splitn = int(n*split_frac) indices = np.arange(n) np.random.shuffle(indices) stage_one = indices[:splitn] FS = info_crit_stop(y, X, sigma, cost=np.log(n), subset=stage_one) if set(range(s)).issubset(FS.active): results, FS = data_carving_IC(y, X, sigma, stage_one=stage_one, splitting=True, ndraw=ndraw, burnin=burnin, coverage=coverage, compute_intervals=compute_intervals, cost=np.log(n)) carve = [r[1] for r in results] split = [r[3] for r in results] Xa = X[:,FS.variables[:-1]] truth = np.dot(np.linalg.pinv(Xa), mu) split_coverage = [] carve_coverage = [] for result, t in zip(results, truth): _, _, ci, _, si = result carve_coverage.append((ci[0] < t) * (t < ci[1])) split_coverage.append((si[0] < t) * (t < si[1])) return ([carve[j] for j, i in enumerate(FS.active) if i >= s], [split[j] for j, i in enumerate(FS.active) if i >= s], [carve[j] for j, i in enumerate(FS.active) if i < s], [split[j] for j, i in enumerate(FS.active) if i < s], counter, carve_coverage, split_coverage) def test_full_pvals(n=100, p=40, rho=0.3, snr=4): X, y, beta, active, sigma = instance(n=n, p=p, snr=snr, rho=rho) FS = forward_stepwise(X, y, covariance=sigma**2 * np.identity(n)) from scipy.stats import norm as ndist pval = [] completed_yet = False for i in range(min(n, p)): FS.next() var_select, pval_select = FS.model_pivots(i+1, alternative='twosided', which_var=[FS.variables[-1]], saturated=False, burnin=2000, ndraw=8000)[0] pval_saturated = FS.model_pivots(i+1, alternative='twosided', which_var=[FS.variables[-1]], saturated=True)[0][1] # now, nominal ones LSfunc = np.linalg.pinv(FS.X[:,FS.variables]) Z = np.dot(LSfunc[-1], FS.Y) / (np.linalg.norm(LSfunc[-1]) * sigma) pval_nominal = 2 * ndist.sf(np.fabs(Z)) pval.append((var_select, pval_select, pval_saturated, pval_nominal)) if set(active).issubset(np.array(pval)[:,0]) and not completed_yet: completed_yet = True completion_index = i + 1 return X, y, beta, active, sigma, np.array(pval), completion_index
stefanv/selective-inference
selection/algorithms/tests/test_forward_step.py
Python
bsd-3-clause
8,482
package gui; import java.io.IOException; import java.io.OutputStream; import javax.swing.JTextArea; public class MyOut extends OutputStream{ private JTextArea texto; public MyOut(JTextArea salida) { this.texto = salida; } public void write(int b) throws IOException { String ch = String.valueOf((char)b); texto.append(ch); texto.setCaretPosition(texto.getDocument().getLength()); } }
MvegaR/proyecto
JavaDSI/src/gui/MyOut.java
Java
bsd-3-clause
418
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.7.0_21) on Thu Dec 12 11:16:51 BRST 2013 --> <title>Uses of Class jason.asSyntax.UnnamedVar (Jason - AgentSpeak Java Interpreter)</title> <meta name="date" content="2013-12-12"> <link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class jason.asSyntax.UnnamedVar (Jason - AgentSpeak Java Interpreter)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../jason/asSyntax/UnnamedVar.html" title="class in jason.asSyntax">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../index-all.html">Index</a></li> <li><a href="../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../index.html?jason/asSyntax/class-use/UnnamedVar.html" target="_top">Frames</a></li> <li><a href="UnnamedVar.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class jason.asSyntax.UnnamedVar" class="title">Uses of Class<br>jason.asSyntax.UnnamedVar</h2> </div> <div class="classUseContainer">No usage of jason.asSyntax.UnnamedVar</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../jason/asSyntax/UnnamedVar.html" title="class in jason.asSyntax">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../package-tree.html">Tree</a></li> <li><a href="../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../index-all.html">Index</a></li> <li><a href="../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../index.html?jason/asSyntax/class-use/UnnamedVar.html" target="_top">Frames</a></li> <li><a href="UnnamedVar.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> </body> </html>
lsa-pucrs/jason-ros-releases
Jason-1.4.0a/doc/api/jason/asSyntax/class-use/UnnamedVar.html
HTML
bsd-3-clause
3,987
#ifndef CONSTRAINT_CHECKER_H #define CONSTRAINT_CHECKER_H #include <Klampt/Modeling/Robot.h> #include <Klampt/Modeling/Terrain.h> #include <Klampt/Contact/Stance.h> namespace Klampt { /** @ingroup Planning * @brief Checks for static constraints for a robot at a given stance. */ struct ConstraintChecker { static Real ContactDistance(const RobotModel& robot,const Stance& stance); static bool HasContact(const RobotModel& robot,const Stance& stance,Real maxDist); static bool HasContactVelocity(const RobotModel& robot,const Stance& stance,Real maxErr); static bool HasJointLimits(const RobotModel& robot); static bool HasVelocityLimits(const RobotModel& robot); static bool HasSupportPolygon(const RobotModel& robot,const Stance& stance,const Vector3& gravity,int numFCEdges=4); static bool HasSupportPolygon_Robust(const RobotModel& robot,const Stance& stance,const Vector3& gravity,Real robustnessFactor,int numFCEdges=4); static bool HasEnvCollision(RobotModel& robot,TerrainModel& env); //same as above, but ignores fixed links static bool HasEnvCollision(RobotModel& robot,TerrainModel& env,const Stance& stance, const vector<int>& ignoreList); static bool HasEnvCollision(RobotModel& robot,TerrainModel& env,const vector<IKGoal>& fixedLinks, const vector<int>& ignoreList); static bool HasEnvCollision(RobotModel& robot,TerrainModel& env,const vector<IKGoal>& fixedLinks); static bool HasSelfCollision(RobotModel& robot); static bool HasTorqueLimits(RobotModel& robot,const Stance& stance,const Vector3& gravity,int numFCEdges=4); }; } //namespace Klampt #endif
krishauser/Klampt
Cpp/Planning/ConstraintChecker.h
C
bsd-3-clause
1,604
/* Provide support for both ANSI and non-ANSI environments. */ /* Some ANSI environments are "broken" in the sense that __STDC__ cannot be relied upon to have it's intended meaning. Therefore we must use our own concoction: _HAVE_STDC. Always use _HAVE_STDC instead of __STDC__ in newlib sources! To get a strict ANSI C environment, define macro __STRICT_ANSI__. This will "comment out" the non-ANSI parts of the ANSI header files (non-ANSI header files aren't affected). */ #ifndef _ANSIDECL_H_ #define _ANSIDECL_H_ #include <sdk/newlib.h> #include <sdk/sys/config.h> /* First try to figure out whether we really are in an ANSI C environment. */ /* FIXME: This probably needs some work. Perhaps sys/config.h can be prevailed upon to give us a clue. */ #ifdef __STDC__ #define _HAVE_STDC #endif /* ISO C++. */ #ifdef __cplusplus #if !(defined(_BEGIN_STD_C) && defined(_END_STD_C)) #ifdef _HAVE_STD_CXX #define _BEGIN_STD_C namespace std { extern "C" { #define _END_STD_C } } #else #define _BEGIN_STD_C extern "C" { #define _END_STD_C } #endif #if defined(__GNUC__) && \ ( (__GNUC__ >= 4) || \ ( (__GNUC__ >= 3) && defined(__GNUC_MINOR__) && (__GNUC_MINOR__ >= 3) ) ) #define _NOTHROW __attribute__ ((nothrow)) #else #define _NOTHROW throw() #endif #endif #else #define _BEGIN_STD_C #define _END_STD_C #define _NOTHROW #endif #ifdef _HAVE_STDC #define _PTR void * #define _AND , #define _NOARGS void #define _CONST const #define _VOLATILE volatile #define _SIGNED signed #define _DOTS , ... #define _VOID void #ifdef __CYGWIN__ #define _EXFUN_NOTHROW(name, proto) __cdecl name proto _NOTHROW #define _EXFUN(name, proto) __cdecl name proto #define _EXPARM(name, proto) (* __cdecl name) proto #define _EXFNPTR(name, proto) (__cdecl * name) proto #else #define _EXFUN_NOTHROW(name, proto) name proto _NOTHROW #define _EXFUN(name, proto) name proto #define _EXPARM(name, proto) (* name) proto #define _EXFNPTR(name, proto) (* name) proto #endif #define _DEFUN(name, arglist, args) name(args) #define _DEFUN_VOID(name) name(_NOARGS) #define _CAST_VOID (void) #ifndef _LONG_DOUBLE #define _LONG_DOUBLE long double #endif #ifndef _LONG_LONG_TYPE #define _LONG_LONG_TYPE long long #endif #ifndef _PARAMS #define _PARAMS(paramlist) paramlist #endif #else #define _PTR char * #define _AND ; #define _NOARGS #define _CONST #define _VOLATILE #define _SIGNED #define _DOTS #define _VOID void #define _EXFUN(name, proto) name() #define _EXFUN_NOTHROW(name, proto) name() #define _DEFUN(name, arglist, args) name arglist args; #define _DEFUN_VOID(name) name() #define _CAST_VOID #define _LONG_DOUBLE double #define _LONG_LONG_TYPE long #ifndef _PARAMS #define _PARAMS(paramlist) () #endif #endif /* Support gcc's __attribute__ facility. */ #ifdef __GNUC__ #define _ATTRIBUTE(attrs) __attribute__ (attrs) #else #define _ATTRIBUTE(attrs) #endif /* The traditional meaning of 'extern inline' for GCC is not to emit the function body unless the address is explicitly taken. However this behaviour is changing to match the C99 standard, which uses 'extern inline' to indicate that the function body *must* be emitted. If we are using GCC, but do not have the new behaviour, we need to use extern inline; if we are using a new GCC with the C99-compatible behaviour, or a non-GCC compiler (which we will have to hope is C99, since there is no other way to achieve the effect of omitting the function if it isn't referenced) we just use plain 'inline', which c99 defines to mean more-or-less the same as the Gnu C 'extern inline'. */ #if defined(__GNUC__) && !defined(__GNUC_STDC_INLINE__) /* We're using GCC, but without the new C99-compatible behaviour. */ #define _ELIDABLE_INLINE extern __inline__ _ATTRIBUTE ((__always_inline__)) #else /* We're using GCC in C99 mode, or an unknown compiler which we just have to hope obeys the C99 semantics of inline. */ #define _ELIDABLE_INLINE __inline__ #endif #endif /* _ANSIDECL_H_ */
nacl-webkit/native_client
src/include/sdk/_ansi.h
C
bsd-3-clause
4,010
{-# LANGUAGE OverloadedStrings, TemplateHaskell #-} module Config where import Control.Applicative import Control.Exception import Data.ByteString (ByteString) import Data.Configurator as C import HFlags import System.Directory import System.FilePath import System.IO import Paths_sproxy_web defineFlag "c:config" ("sproxy-web.config" :: String) "config file" data Config = Config { dbConnectionString :: ByteString, port :: Int, staticDir :: FilePath } deriving (Show, Eq) -- | Get the connection string and the port -- from the config file getConfig :: FilePath -> IO Config getConfig configFile = do conf <- C.load [C.Required configFile] Config <$> C.require conf "db_connection_string" <*> C.require conf "port" <*> getStaticDir getStaticDir :: IO FilePath getStaticDir = do currentDir <- getCurrentDirectory staticExists <- doesDirectoryExist (currentDir </> "static") if staticExists then do hPutStrLn stderr ("Serving static files from " ++ currentDir ++ " -- This is bad since it probably allows to publicly access source code files.") return currentDir else do cabalDataDir <- getDataDir cabalDataDirExists <- doesDirectoryExist cabalDataDir if cabalDataDirExists then return cabalDataDir else throwIO (ErrorCall "directory for static files not found.")
alpmestan/spw
src/Config.hs
Haskell
bsd-3-clause
1,432
/*************************************************************************** * Copyright (c) Johan Mabille, Sylvain Corlay, Wolf Vollprecht and * * Martin Renou * * Copyright (c) QuantStack * * Copyright (c) Serge Guelton * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #include "gtest/gtest.h" int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
JohanMabille/xsimd
test/main.cpp
C++
bsd-3-clause
916
# proxy module from __future__ import absolute_import from codetools.blocks.analysis import *
enthought/etsproxy
enthought/blocks/analysis.py
Python
bsd-3-clause
94
using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Microsoft.Framework.Logging; namespace WorkMarketingNet.Logging.Core { public class Logger : ILogger { private readonly Microsoft.Framework.Logging.ILogger _logger; public Logger(ILoggerFactory loggerfactory) { // Add the console logger. loggerfactory.AddConsole(minLevel: LogLevel.Debug); _logger = loggerfactory.CreateLogger(typeof(Logger).FullName); } /// <summary> /// Log a message object with the Debug level. /// </summary> /// <param name="message">The message object to log.</param> public virtual void Debug(object message) { _logger.LogDebug(message.ToString()); //_log.Debug(message); } /// <summary> /// Log a message object with the Debug level including the stack trace of the System.Exception passed as a parameter. /// </summary> /// <param name="message">The message object to log</param> /// <param name="exception">The exception to log, including its stack trace</param> public virtual void Debug(object message, Exception exception) { //_log.Debug(message, exception); } /// <summary> /// Logs a formatted message string with the Debug level. /// </summary> /// <param name="format">A String containing zero or more format items</param> /// <param name="args">An Object array containing zero or more objects to format</param> public virtual void DebugFormat(string format, params object[] args) { //_log.DebugFormat(format, args); } public virtual void Info(object message) { //_log.Info(message); } public virtual void Info(object message, Exception exception) { //_log.Info(message, exception); } public virtual void InfoFormat(string format, params object[] args) { //_log.InfoFormat(format, args); } public virtual void Warn(object message) { //_log.Warn(message); } public virtual void Warn(object message, Exception exception) { //_log.Warn(message, exception); } public virtual void WarnFormat(string format, params object[] args) { //_log.WarnFormat(format, args); } public virtual void Error(object message) { //_log.Error(message); } public virtual void Error(object message, Exception exception) { //_log.Error(message, exception); } public virtual void ErrorFormat(string format, params object[] args) { //_log.ErrorFormat(format, args); } public virtual void Fatal(object message) { //_log.Fatal(message); } public virtual void Fatal(object message, Exception exception) { //_log.Fatal(message, exception); } public virtual void FatalFormat(string format, params object[] args) { //_log.FatalFormat(format, args); } } }
JacekKosciesza/WorkMarketingNet
old/Logging/WorkMarketingNet.Logging.Core/Logger.cs
C#
bsd-3-clause
2,763
# Math This is a C++ template based math library include Vector and Matrix classes. It is a header only version. # Motivation After reading the following http://www.reedbeta.com/blog/2013/12/28/on-vector-math-libraries/ I got inspired to write my own math library trying to follow any idea found in the article. # Operators The following list contains all operators that are currently implemented: - = - += - -= - *= - /= - == - != - + - - - * - / Note that the * and / operators are special versions and they cannot accept a second vector as argument. Same applies of course for the matching compound operators. <pre> Vector3f a(2.0f); Vector3f b = a * 2.0f; </pre> # Functions The Vector header files also contains all necessary functions on vectors. They are not part of the vector struct itself. The following functions mainly work on all sizes. Only the cross function is limited to Vectors of size 3. Also the lerp function is limited to the type float. | Name | Description | | ----------- | ----------------------------------------------- | | vec_min | min of two vectors | | vec_max | max of two vectors | | lerp | linear interpolation between to vectors | | clamp | Clamps a given vector in the range of two other | | saturate | Clamps a given vector in the range of 0 to 1 | | dot | dot product of two vectors | | cross | cross product of two vectors | | length | length of a vector | | sqr_length | the square length of a vector | | distance | the distance from one vector to another | | normalize | normalized vector | | catmullRom | cubic interpolation | # How to use it Just include "Vector.h" in your code and you are ready to go. ## Predefined types | Name | Size | type | | ------- |:----:| ----- | |Vector2i | 2 | int | |Vector2f | 2 | float | |Vector3i | 3 | int | |Vector3f | 3 | float | |Vector4f | 4 | float | In case you need something else you can of course define your own like <pre> typedef Vector<4,float> Vector4f </pre> # IPath This is a template class to support a path. It requires that you define the actual number of elments and the type. Here is a short example using a Vector2f: <pre> IPath<3,Vector2f> p; p.add(0.0f,Vector2f(1.0f,1.0f)); p.add(0.5f,Vector2f(2.0f,2.0f)); p.add(1.0f,Vector2f(3.0f,3.0f)); Vector2f f; bool ret = p.get(0.25f,&f); </pre> Since Path is a reserved class name it is called IPath. But it is not an interface. The actual name InterpolationPath was just too long. # Versions ## 0.1 This is the current version. # Notes The code is released under the BSD license. This means you can do whatever you like. In case you want to contact me send a mail to amecky@gmail.com. I am open to any discussion. If you want to contribute code then please feel free to contact me or just fork the code.
amecky/math
README.md
Markdown
bsd-3-clause
3,117
from django.conf.urls.defaults import patterns, url from snippets.base import views urlpatterns = patterns('', url(r'^$', views.index, name='base.index'), url(r'^(?P<startpage_version>[^/]+)/(?P<name>[^/]+)/(?P<version>[^/]+)/' '(?P<appbuildid>[^/]+)/(?P<build_target>[^/]+)/(?P<locale>[^/]+)/' '(?P<channel>[^/]+)/(?P<os_version>[^/]+)/(?P<distribution>[^/]+)/' '(?P<distribution_version>[^/]+)/$', views.fetch_snippets, name='view_snippets'), url(r'^admin/base/snippet/preview/', views.preview_empty, name='base.admin.preview_empty'), url(r'^admin/base/snippet/(\d+)/preview/', views.preview_snippet, name='base.admin.preview_snippet'), url(r'^admin/base/snippettemplate/(\d+)/variables/', views.admin_template_json, name='base.admin.template_json'), )
Osmose/snippets-service-prototype
snippets/base/urls.py
Python
bsd-3-clause
836
var __extends = this.__extends || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } __.prototype = b.prototype; d.prototype = new __(); }; define(["require", "exports", '../Binding'], function (require, exports, Binding) { var ArrayBinding = (function (_super) { __extends(ArrayBinding, _super); function ArrayBinding(kwArgs) { _super.call(this, kwArgs); var array = this._object = kwArgs.object; var self = this; var pop = array.pop; array.pop = function () { if (this.length) { var oldValue = pop.apply(this, arguments); self.notify && self.notify({ index: this.length, removed: [oldValue] }); return oldValue; } }; var push = array.push; array.push = function () { var newValues = []; for (var _i = 0; _i < arguments.length; _i++) { newValues[_i - 0] = arguments[_i]; } var newLength = push.apply(this, arguments); self.notify && self.notify({ index: newLength - newValues.length, added: newValues }); return newLength; }; var reverse = array.reverse; array.reverse = function () { var oldValues = this.slice(0); var returnValue = reverse.apply(this, arguments); self.notify && self.notify({ index: 0, removed: oldValues, added: this }); return returnValue; }; var shift = array.shift; array.shift = function () { if (this.length) { var oldValue = shift.apply(this, arguments); self.notify && self.notify({ index: 0, removed: [oldValue] }); return oldValue; } }; var sort = array.sort; array.sort = function () { var oldValues = this.slice(0); var returnValue = sort.apply(this, arguments); self.notify && self.notify({ index: 0, removed: oldValues, added: this }); return returnValue; }; var splice = array.splice; array.splice = function (index, numToRemove) { if (numToRemove === void 0) { numToRemove = 0; } var newValues = []; for (var _i = 2; _i < arguments.length; _i++) { newValues[_i - 2] = arguments[_i]; } var oldValues = splice.apply(this, arguments); self.notify && self.notify({ index: index, removed: oldValues, added: newValues }); return oldValues; }; var unshift = array.unshift; array.unshift = function () { var newValues = []; for (var _i = 0; _i < arguments.length; _i++) { newValues[_i - 0] = arguments[_i]; } var newLength = unshift.apply(this, arguments); self.notify && self.notify({ index: 0, added: newValues }); return newLength; }; } ArrayBinding.test = function (kwArgs) { return kwArgs.object instanceof Array && kwArgs.path === '*'; }; ArrayBinding.prototype.getObject = function () { return this._object; }; ArrayBinding.prototype.destroy = function () { _super.prototype.destroy.call(this); this._object = this.notify = null; }; return ArrayBinding; })(Binding); return ArrayBinding; }); //# sourceMappingURL=../../_debug/binding/bindings/ArrayBinding.js.map
SitePen/mayhem-bower
binding/bindings/ArrayBinding.js
JavaScript
bsd-3-clause
3,879
import os import numpy as np import tables import galry.pyplot as plt from galry import Visual, process_coordinates, get_next_color, get_color from qtools import inthread MAXSIZE = 5000 CHANNEL_HEIGHT = .25 class MultiChannelVisual(Visual): def initialize(self, x=None, y=None, color=None, point_size=1.0, position=None, nprimitives=None, index=None, color_array_index=None, channel_height=CHANNEL_HEIGHT, options=None, autocolor=None): position, shape = process_coordinates(x=x, y=y) # register the size of the data self.size = np.prod(shape) # there is one plot per row if not nprimitives: nprimitives = shape[0] nsamples = shape[1] else: nsamples = self.size // nprimitives # register the bounds if nsamples <= 1: self.bounds = [0, self.size] else: self.bounds = np.arange(0, self.size + 1, nsamples) # automatic color with color map if autocolor is not None: if nprimitives <= 1: color = get_next_color(autocolor) else: color = np.array([get_next_color(i + autocolor) for i in xrange(nprimitives)]) # set position attribute self.add_attribute("position0", ndim=2, data=position, autonormalizable=True) index = np.array(index) self.add_index("index", data=index) if color_array_index is None: color_array_index = np.repeat(np.arange(nprimitives), nsamples) color_array_index = np.array(color_array_index) ncolors = color.shape[0] ncomponents = color.shape[1] color = color.reshape((1, ncolors, ncomponents)) dx = 1. / ncolors offset = dx / 2. self.add_texture('colormap', ncomponents=ncomponents, ndim=1, data=color) self.add_attribute('index', ndim=1, vartype='int', data=color_array_index) self.add_varying('vindex', vartype='int', ndim=1) self.add_uniform('nchannels', vartype='float', ndim=1, data=float(nprimitives)) self.add_uniform('channel_height', vartype='float', ndim=1, data=channel_height) self.add_vertex_main(""" vec2 position = position0; position.y = channel_height * position.y + .9 * (2 * index - (nchannels - 1)) / (nchannels - 1); vindex = index; """) self.add_fragment_main(""" float coord = %.5f + vindex * %.5f; vec4 color = texture1D(colormap, coord); out_color = color; """ % (offset, dx)) # add point size uniform (when it's not specified, there might be some # bugs where its value is obtained from other datasets...) self.add_uniform("point_size", data=point_size) self.add_vertex_main("""gl_PointSize = point_size;""") def get_view(total_size, xlim, freq): """Return the slice of the data. Arguments: * xlim: (x0, x1) of the window currently displayed. """ # Viewport. x0, x1 = xlim d = x1 - x0 dmax = duration zoom = max(dmax / d, 1) view_size = total_size / zoom step = int(np.ceil(view_size / MAXSIZE)) # Extended viewport for data. x0ex = np.clip(x0 - 3 * d, 0, dmax) x1ex = np.clip(x1 + 3 * d, 0, dmax) i0 = np.clip(int(np.round(x0ex * freq)), 0, total_size) i1 = np.clip(int(np.round(x1ex * freq)), 0, total_size) return (x0ex, x1ex), slice(i0, i1, step) def get_undersampled_data(data, xlim, slice): """ Arguments: * data: a HDF5 dataset of size Nsamples x Nchannels. * xlim: (x0, x1) of the current data view. """ # total_size = data.shape[0] # Get the view slice. # x0ex, x1ex = xlim # x0d, x1d = x0ex / (duration_initial) * 2 - 1, x1ex / (duration_initial) * 2 - 1 # Extract the samples from the data (HDD access). samples = data[slice, :] # Convert the data into floating points. samples = np.array(samples, dtype=np.float32) # Normalize the data. samples *= (1. / 65535) # samples *= .25 # Size of the slice. nsamples, nchannels = samples.shape # Create the data array for the plot visual. M = np.empty((nsamples * nchannels, 2)) samples = samples.T# + np.linspace(-1., 1., nchannels).reshape((-1, 1)) M[:, 1] = samples.ravel() # Generate the x coordinates. x = np.arange(slice.start, slice.stop, slice.step) / float(total_size - 1) # [0, 1] -> [-1, 2*duration.duration_initial - 1] x = x * 2 * duration / duration_initial - 1 M[:, 0] = np.tile(x, nchannels) # Update the bounds. bounds = np.arange(nchannels + 1) * nsamples size = bounds[-1] return M, bounds, size @inthread class DataUpdater(object): info = {} def update(self, data, xlimex, slice): samples, bounds, size = get_undersampled_data(data, xlimex, slice) nsamples = samples.shape[0] color_array_index = np.repeat(np.arange(nchannels), nsamples / nchannels) self.info = dict(position0=samples, bounds=bounds, size=size, index=color_array_index) dir = os.path.dirname(os.path.abspath(__file__)) try: filename = r"test_data/n6mab031109.h5" f = tables.openFile(os.path.join(dir, filename)) except: filename = r"test_data/n6mab031109.trim.h5" f = tables.openFile(os.path.join(dir, filename)) try: data = f.root.RawData except: data = f.root.raw_data nsamples, nchannels = data.shape total_size = nsamples freq = 20000. dt = 1. / freq duration = (data.shape[0] - 1) * dt duration_initial = 5. x = np.tile(np.linspace(0., duration, nsamples // MAXSIZE), (nchannels, 1)) y = np.zeros_like(x)+ np.linspace(-.9, .9, nchannels).reshape((-1, 1)) plt.figure(toolbar=False, show_grid=True) plt.visual(MultiChannelVisual, x=x, y=y) updater = DataUpdater(impatient=True) SLICE = None def change_channel_height(figure, parameter): global CHANNEL_HEIGHT CHANNEL_HEIGHT *= (1 + parameter) figure.set_data(channel_height=CHANNEL_HEIGHT) def pan(figure, parameter): figure.process_interaction('Pan', parameter) def anim(figure, parameter): # Constrain the zoom. nav = figure.get_processor('navigation') nav.constrain_navigation = True nav.xmin = -1 nav.xmax = 2 * duration / duration_initial nav.sxmin = 1. zoom = nav.sx box = nav.get_viewbox() xlim = ((box[0] + 1) / 2. * (duration_initial), (box[2] + 1) / 2. * (duration_initial)) xlimex, slice = get_view(data.shape[0], xlim, freq) # Paging system. dur = xlim[1] - xlim[0] index = int(np.floor(xlim[0] / dur)) zoom_index = int(np.round(duration_initial / dur)) i = (index, zoom_index) global SLICE if i != SLICE: SLICE = i updater.update(data, xlimex, slice) if updater.info: figure.set_data(**updater.info) updater.info.clear() plt.animate(anim, dt=.01) plt.action('Wheel', change_channel_height, key_modifier='Control', param_getter=lambda p: p['wheel'] * .001) plt.action('Wheel', pan, key_modifier='Shift', param_getter=lambda p: (p['wheel'] * .002, 0)) plt.action('DoubleClick', 'ResetZoom') plt.xlim(0., duration_initial) plt.show() f.close()
rossant/spiky
experimental/ephyview.py
Python
bsd-3-clause
7,411
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Fare' db.create_table('gtfs_fare', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True)), ('fare_id', self.gf('django.db.models.fields.CharField')(max_length=20, db_index=True)), ('price', self.gf('django.db.models.fields.FloatField')()), ('currency_type', self.gf('django.db.models.fields.CharField')(max_length=3)), ('payment_method', self.gf('django.db.models.fields.IntegerField')()), ('transfers', self.gf('django.db.models.fields.IntegerField')(null=True)), ('transfer_duration', self.gf('django.db.models.fields.IntegerField')()), )) db.send_create_signal('gtfs', ['Fare']) # Adding unique constraint on 'Fare', fields ['source', 'fare_id'] db.create_unique('gtfs_fare', ['source_id', 'fare_id']) # Adding unique constraint on 'Shape', fields ['source', 'shape_id'] db.create_unique('gtfs_shape', ['source_id', 'shape_id']) # Adding field 'Zone.source' db.add_column('gtfs_zone', 'source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gtfs.Source'], null=True), keep_default=False) # Adding unique constraint on 'Zone', fields ['source', 'zone_id'] db.create_unique('gtfs_zone', ['source_id', 'zone_id']) # Deleting field 'FareRule.payment_method' db.delete_column('gtfs_farerule', 'payment_method') # Deleting field 'FareRule.price' db.delete_column('gtfs_farerule', 'price') # Deleting field 'FareRule.currency_type' db.delete_column('gtfs_farerule', 'currency_type') # Deleting field 'FareRule.transfer_duration' db.delete_column('gtfs_farerule', 'transfer_duration') # Deleting field 'FareRule.transfers' db.delete_column('gtfs_farerule', 'transfers') # Deleting field 'FareRule.farerule_id' db.delete_column('gtfs_farerule', 'farerule_id') # Deleting field 'FareRule.agency' db.delete_column('gtfs_farerule', 'agency_id') # Adding field 'FareRule.fare' db.add_column('gtfs_farerule', 'fare', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['gtfs.Fare']), keep_default=False) def backwards(self, orm): # Removing unique constraint on 'Zone', fields ['source', 'zone_id'] db.delete_unique('gtfs_zone', ['source_id', 'zone_id']) # Removing unique constraint on 'Shape', fields ['source', 'shape_id'] db.delete_unique('gtfs_shape', ['source_id', 'shape_id']) # Removing unique constraint on 'Fare', fields ['source', 'fare_id'] db.delete_unique('gtfs_fare', ['source_id', 'fare_id']) # Deleting model 'Fare' db.delete_table('gtfs_fare') # Deleting field 'Zone.source' db.delete_column('gtfs_zone', 'source_id') # User chose to not deal with backwards NULL issues for 'FareRule.payment_method' raise RuntimeError("Cannot reverse this migration. 'FareRule.payment_method' and its values cannot be restored.") # User chose to not deal with backwards NULL issues for 'FareRule.price' raise RuntimeError("Cannot reverse this migration. 'FareRule.price' and its values cannot be restored.") # User chose to not deal with backwards NULL issues for 'FareRule.currency_type' raise RuntimeError("Cannot reverse this migration. 'FareRule.currency_type' and its values cannot be restored.") # User chose to not deal with backwards NULL issues for 'FareRule.transfer_duration' raise RuntimeError("Cannot reverse this migration. 'FareRule.transfer_duration' and its values cannot be restored.") # Adding field 'FareRule.transfers' db.add_column('gtfs_farerule', 'transfers', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False) # User chose to not deal with backwards NULL issues for 'FareRule.farerule_id' raise RuntimeError("Cannot reverse this migration. 'FareRule.farerule_id' and its values cannot be restored.") # User chose to not deal with backwards NULL issues for 'FareRule.agency' raise RuntimeError("Cannot reverse this migration. 'FareRule.agency' and its values cannot be restored.") # Deleting field 'FareRule.fare' db.delete_column('gtfs_farerule', 'fare_id') models = { 'gtfs.agency': { 'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'name': ('django.db.models.fields.TextField', [], {}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'gtfs.block': { 'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'}, 'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}) }, 'gtfs.calendar': { 'Meta': {'object_name': 'Calendar'}, 'end_date': ('django.db.models.fields.DateField', [], {}), 'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {}), 'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'gtfs.calendardate': { 'Meta': {'object_name': 'CalendarDate'}, 'date': ('django.db.models.fields.DateField', [], {}), 'exception_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"}) }, 'gtfs.fare': { 'Meta': {'unique_together': "(('source', 'fare_id'),)", 'object_name': 'Fare'}, 'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'fare_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'payment_method': ('django.db.models.fields.IntegerField', [], {}), 'price': ('django.db.models.fields.FloatField', [], {}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}), 'transfer_duration': ('django.db.models.fields.IntegerField', [], {}), 'transfers': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'gtfs.farerule': { 'Meta': {'object_name': 'FareRule'}, 'contains': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_contains'", 'null': 'True', 'to': "orm['gtfs.Zone']"}), 'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_destinations'", 'null': 'True', 'to': "orm['gtfs.Zone']"}), 'fare': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Fare']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fare_rule_origins'", 'null': 'True', 'to': "orm['gtfs.Zone']"}), 'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']", 'null': 'True'}) }, 'gtfs.frequency': { 'Meta': {'object_name': 'Frequency'}, 'end_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'end_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'headway_secs': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'start_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"}) }, 'gtfs.route': { 'Meta': {'unique_together': "(('agency', 'route_id'),)", 'object_name': 'Route'}, 'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Agency']", 'null': 'True'}), 'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}), 'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'long_name': ('django.db.models.fields.TextField', [], {}), 'route_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}), 'route_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}), 'text_color': ('django.db.models.fields.TextField', [], {'max_length': '6', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}) }, 'gtfs.service': { 'Meta': {'unique_together': "(('source', 'service_id'),)", 'object_name': 'Service'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'service_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}) }, 'gtfs.shape': { 'Meta': {'unique_together': "(('source', 'shape_id'),)", 'object_name': 'Shape'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'path': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True'}), 'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}) }, 'gtfs.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'gtfs.stop': { 'Meta': {'unique_together': "(('source', 'stop_id'),)", 'object_name': 'Stop'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}), 'desc': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {}), 'location_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}), 'name': ('django.db.models.fields.TextField', [], {}), 'parent_station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}), 'stop_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']", 'null': 'True'}) }, 'gtfs.stoptime': { 'Meta': {'object_name': 'StopTime'}, 'arrival_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'arrival_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'departure_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'departure_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'drop_off_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pickup_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'shape_dist_travelled': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']"}), 'stop_headsign': ('django.db.models.fields.TextField', [], {}), 'stop_sequence': ('django.db.models.fields.IntegerField', [], {}), 'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"}) }, 'gtfs.transfer': { 'Meta': {'object_name': 'Transfer'}, 'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_from_stop'", 'to': "orm['gtfs.Stop']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'min_transfer_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_to_stop'", 'to': "orm['gtfs.Stop']"}), 'transfer_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'gtfs.trip': { 'Meta': {'unique_together': "(('service', 'trip_id'), ('route', 'trip_id'))", 'object_name': 'Trip'}, 'block': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Block']", 'null': 'True'}), 'direction_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'headsign': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']"}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"}), 'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Shape']", 'null': 'True'}), 'short_name': ('django.db.models.fields.TextField', [], {}), 'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}) }, 'gtfs.zone': { 'Meta': {'unique_together': "(('source', 'zone_id'),)", 'object_name': 'Zone'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}), 'zone_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}) } } complete_apps = ['gtfs']
rcoup/traveldash
traveldash/gtfs/migrations/0011_auto__add_fare__add_unique_fare_source_fare_id__add_unique_shape_sourc.py
Python
bsd-3-clause
16,927
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROMEOS_SERVICES_SECURE_CHANNEL_DEVICE_TO_DEVICE_INITIATOR_HELPER_H_ #define CHROMEOS_SERVICES_SECURE_CHANNEL_DEVICE_TO_DEVICE_INITIATOR_HELPER_H_ #include <memory> #include <string> #include "base/callback.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "chromeos/services/secure_channel/session_keys.h" #include "third_party/ukey2/proto/device_to_device_messages.pb.h" namespace chromeos { namespace multidevice { class SecureMessageDelegate; } // namespace multidevice namespace secure_channel { // Class containing operations in the DeviceToDevice protocol that the initiator // needs to perform. This class is instantiable rather than being a utility // class because it relies on a WeakPtrFactory to prevent referencing deleted // memory. // // All operations are asynchronous because we use the SecureMessageDelegate for // crypto operations, whose implementation may be asynchronous. // // In the DeviceToDevice protocol, the initiator needs to send two messages to // the responder and parse one message from the responder: // 1. Send [Hello] Message // This message contains a public key that the initiator generates for the // current session. This message is signed by the long term symmetric key. // 2. Parse [Responder Auth] Message // The responder parses [Hello] and sends this message, which contains the // responder's session public key. This message also contains sufficient // information for the initiator to authenticate the responder. // 3. Send [Initiator Auth] Message // After receiving the responder's session public key, the initiator crafts // and sends this message so the responder can authenticate the initiator. class DeviceToDeviceInitiatorHelper { public: // Callback for operations that create a message. Invoked with the serialized // SecureMessage upon success or the empty string upon failure. typedef base::OnceCallback<void(const std::string&)> MessageCallback; // Callback for ValidateResponderAuthMessage. The first argument will be // called with the validation outcome. If validation succeeded, then the // second argument will contain the session symmetric key derived from the // [Responder Auth] message. typedef base::OnceCallback<void(bool, const SessionKeys&)> ValidateResponderAuthCallback; DeviceToDeviceInitiatorHelper(); DeviceToDeviceInitiatorHelper(const DeviceToDeviceInitiatorHelper&) = delete; DeviceToDeviceInitiatorHelper& operator=( const DeviceToDeviceInitiatorHelper&) = delete; virtual ~DeviceToDeviceInitiatorHelper(); // Creates the [Hello] message, which is the first message that is sent: // |session_public_key|: This session public key will be stored in plaintext // (but signed) so the responder can parse it. // |persistent_symmetric_key|: The long-term symmetric key that is shared by // the initiator and responder. // |secure_message_delegate|: Delegate for SecureMessage operations. This // instance is not owned, and must live until after |callback| is invoked. // |callback|: Invoked upon operation completion with the serialized message // or an empty string. void CreateHelloMessage( const std::string& session_public_key, const std::string& persistent_symmetric_key, multidevice::SecureMessageDelegate* secure_message_delegate, MessageCallback callback); // Validates that the [Responder Auth] message, received from the responder, // is properly signed and encrypted. // |responder_auth_message|: The bytes of the [Responder Auth] message to // validate. // |persistent_responder_public_key|: The long-term public key possessed by // the responder device. // |persistent_symmetric_key|: The long-term symmetric key that is shared by // the initiator and responder. // |session_private_key|: The session private key is used in an Diffie-Helmann // key exchange once the responder public key is extracted. The derived // session symmetric key is used in the validation process. // |hello_message|: The initial [Hello] message that was sent, which is used // in the signature calculation. // |secure_message_delegate|: Delegate for SecureMessage operations. This // instance is not owned, and must live until after |callback| is invoked. // |callback|: Invoked upon operation completion with whether // |responder_auth_message| is validated successfully. void ValidateResponderAuthMessage( const std::string& responder_auth_message, const std::string& persistent_responder_public_key, const std::string& persistent_symmetric_key, const std::string& session_private_key, const std::string& hello_message, multidevice::SecureMessageDelegate* secure_message_delegate, ValidateResponderAuthCallback callback); // Creates the [Initiator Auth] message, which allows the responder to // authenticate the initiator: // |session_keys|: The session symmetric keys. // |persistent_symmetric_key|: The long-term symmetric key that is shared by // the initiator and responder. // |responder_auth_message|: The [Responder Auth] message sent previously to // the responder. These bytes are used in the signature calculation. // |secure_message_delegate|: Delegate for SecureMessage operations. This // instance is not owned, and must live until after |callback| is invoked. // |callback|: Invoked upon operation completion with the serialized message // or an empty string. void CreateInitiatorAuthMessage( const SessionKeys& session_keys, const std::string& persistent_symmetric_key, const std::string& responder_auth_message, multidevice::SecureMessageDelegate* secure_message_delegate, MessageCallback callback); private: // Helper struct containing all the context needed to validate the // [Responder Auth] message. struct ValidateResponderAuthMessageContext { ValidateResponderAuthMessageContext( const std::string& responder_auth_message, const std::string& persistent_responder_public_key, const std::string& persistent_symmetric_key, const std::string& session_private_key, const std::string& hello_message, multidevice::SecureMessageDelegate* secure_message_delegate); ValidateResponderAuthMessageContext( const ValidateResponderAuthMessageContext& other); ~ValidateResponderAuthMessageContext(); std::string responder_auth_message; std::string persistent_responder_public_key; std::string persistent_symmetric_key; std::string session_private_key; std::string hello_message; multidevice::SecureMessageDelegate* secure_message_delegate; std::string responder_session_public_key; std::string session_symmetric_key; }; // Begins the [Responder Auth] validation flow by validating the header. void BeginResponderAuthValidation(ValidateResponderAuthMessageContext context, ValidateResponderAuthCallback callback); // Called after the session symmetric key is derived, so now we can unwrap the // outer message of [Responder Auth]. void OnSessionSymmetricKeyDerived(ValidateResponderAuthMessageContext context, ValidateResponderAuthCallback callback, const std::string& session_symmetric_key); // Called after the outer-most layer of [Responder Auth] is unwrapped. void OnOuterMessageUnwrappedForResponderAuth( const ValidateResponderAuthMessageContext& context, ValidateResponderAuthCallback callback, bool verified, const std::string& payload, const securemessage::Header& header); // Called after the middle layer of [Responder Auth] is unwrapped. void OnMiddleMessageUnwrappedForResponderAuth( const ValidateResponderAuthMessageContext& context, ValidateResponderAuthCallback callback, bool verified, const std::string& payload, const securemessage::Header& header); // Called after inner message is created. void OnInnerMessageCreatedForInitiatorAuth( const SessionKeys& session_keys, multidevice::SecureMessageDelegate* secure_message_delegate, DeviceToDeviceInitiatorHelper::MessageCallback callback, const std::string& inner_message); // Callback for CreateInitiatorAuthMessage(), after the inner message is // created. void OnInnerMessageUnwrappedForResponderAuth( const ValidateResponderAuthMessageContext& context, ValidateResponderAuthCallback callback, bool verified, const std::string& payload, const securemessage::Header& header); base::WeakPtrFactory<DeviceToDeviceInitiatorHelper> weak_ptr_factory_{this}; }; } // namespace secure_channel } // namespace chromeos #endif // CHROMEOS_SERVICES_SECURE_CHANNEL_DEVICE_TO_DEVICE_INITIATOR_HELPER_H_
nwjs/chromium.src
chromeos/services/secure_channel/device_to_device_initiator_helper.h
C
bsd-3-clause
9,135
<?php use common\modules\lead\models\forms\LeadSourceForm; use yii\helpers\Html; /* @var $this yii\web\View */ /* @var $model LeadSourceForm */ $this->title = Yii::t('lead', 'Create Lead Source'); $this->params['breadcrumbs'][] = ['label' => Yii::t('lead', 'Leads'), 'url' => ['/lead/lead/index']]; $this->params['breadcrumbs'][] = ['label' => Yii::t('lead', 'Sources'), 'url' => ['index']]; $this->params['breadcrumbs'][] = $this->title; ?> <div class="lead-source-create"> <h1><?= Html::encode($this->title) ?></h1> <?= $this->render('_form', [ 'model' => $model, ]) ?> </div>
edzima/VestraTele
common/modules/lead/views/source/create.php
PHP
bsd-3-clause
590
/*========================================================================= Program: Visualization Toolkit Module: vtkLookupTable.h Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ // .NAME vtkLookupTable - map scalar values into colors via a lookup table // .SECTION Description // vtkLookupTable is an object that is used by mapper objects to map scalar // values into RGBA (red-green-blue-alpha transparency) color specification, // or RGBA into scalar values. The color table can be created by direct // insertion of color values, or by specifying a hue, saturation, value, and // alpha range and generating a table. // // A special color for NaN values in the data can be specified via // SetNanColor(). In addition, a color for data values below the // lookup table range minimum can be specified with // SetBelowRangeColor(), and that color will be used for values below // the range minimum when UseBelowRangeColor is on. Likewise, a color // for data values above the lookup table range maximum can be // specified with SetAboveRangeColor(), and it is used when // UseAboveRangeColor is on. // // This class behaves differently depending on how \a IndexedLookup is set. // When true, vtkLookupTable enters a mode for representing categorical color maps. // By setting \a IndexedLookup to true, you indicate that the annotated // values are the only valid values for which entries in the color table // should be returned. The colors in the lookup \a Table are assigned // to annotated values by taking the modulus of their index in the list // of annotations. \a IndexedLookup changes the behavior of \a GetIndex, // which in turn changes the way \a MapScalarsThroughTable2 behaves; // when \a IndexedLookup is true, \a MapScalarsThroughTable2 will search for // scalar values in \a AnnotatedValues and use the resulting index to // determine the color. If a scalar value is not present in \a AnnotatedValues, // then \a NanColor will be used. // // .SECTION Caveats // You need to explicitly call Build() when constructing the LUT by hand. // // .SECTION See Also // vtkLogLookupTable vtkWindowLevelLookupTable #ifndef vtkLookupTable_h #define vtkLookupTable_h #include "vtkCommonCoreModule.h" // For export macro #include "vtkScalarsToColors.h" #include "vtkUnsignedCharArray.h" // Needed for inline method #define VTK_RAMP_LINEAR 0 #define VTK_RAMP_SCURVE 1 #define VTK_RAMP_SQRT 2 #define VTK_SCALE_LINEAR 0 #define VTK_SCALE_LOG10 1 class VTKCOMMONCORE_EXPORT vtkLookupTable : public vtkScalarsToColors { public: // Description: // Constants for offsets of special colors (e.g., NanColor, BelowRangeColor, // AboveRangeColor) from the maximum index in the lookup table. const static vtkIdType BELOW_RANGE_COLOR_INDEX; const static vtkIdType ABOVE_RANGE_COLOR_INDEX; const static vtkIdType NAN_COLOR_INDEX; const static vtkIdType NUMBER_OF_SPECIAL_COLORS; // Description: // Construct with range=[0,1]; and hsv ranges set up for rainbow color table // (from red to blue). static vtkLookupTable *New(); vtkTypeMacro(vtkLookupTable,vtkScalarsToColors); void PrintSelf(ostream& os, vtkIndent indent); // Description: // Return true if all of the values defining the mapping have an opacity // equal to 1. virtual int IsOpaque(); // Description: // Allocate a color table of specified size. int Allocate(int sz=256, int ext=256); // Description: // Generate lookup table from hue, saturation, value, alpha min/max values. // Table is built from linear ramp of each value. virtual void Build(); // Description: // Force the lookup table to regenerate from hue, saturation, value, // and alpha min/max values. Table is built from a linear ramp of // each value. ForceBuild() is useful if a lookup table has been // defined manually (using SetTableValue) and then an application // decides to rebuild the lookup table using the implicit process. virtual void ForceBuild(); // Description: // Copies the "special" colors into the given table. void BuildSpecialColors(); // Description: // Set the shape of the table ramp to either linear or S-curve. // The default is S-curve, which tails off gradually at either end. // The equation used for the S-curve is y = (sin((x - 1/2)*pi) + 1)/2, // while the equation for the linear ramp is simply y = x. For an // S-curve greyscale ramp, you should set NumberOfTableValues to 402 // (which is 256*pi/2) to provide room for the tails of the ramp. // The equation for the SQRT is y = sqrt(x). vtkSetMacro(Ramp,int); void SetRampToLinear() { this->SetRamp(VTK_RAMP_LINEAR); }; void SetRampToSCurve() { this->SetRamp(VTK_RAMP_SCURVE); }; void SetRampToSQRT() { this->SetRamp(VTK_RAMP_SQRT); }; vtkGetMacro(Ramp,int); // Description: // Set the type of scale to use, linear or logarithmic. The default // is linear. If the scale is logarithmic, then the TableRange must not // cross the value zero. void SetScale(int scale); void SetScaleToLinear() { this->SetScale(VTK_SCALE_LINEAR); }; void SetScaleToLog10() { this->SetScale(VTK_SCALE_LOG10); }; vtkGetMacro(Scale,int); // Description: // Set/Get the minimum/maximum scalar values for scalar mapping. Scalar // values less than minimum range value are clamped to minimum range value. // Scalar values greater than maximum range value are clamped to maximum // range value. // // The \a TableRange values are only used when \a IndexedLookup is false. void SetTableRange(double r[2]); virtual void SetTableRange(double min, double max); vtkGetVectorMacro(TableRange,double,2); // Description: // Set the range in hue (using automatic generation). Hue ranges // between [0,1]. vtkSetVector2Macro(HueRange,double); vtkGetVector2Macro(HueRange,double); // Description: // Set the range in saturation (using automatic generation). Saturation // ranges between [0,1]. vtkSetVector2Macro(SaturationRange,double); vtkGetVector2Macro(SaturationRange,double); // Description: // Set the range in value (using automatic generation). Value ranges // between [0,1]. vtkSetVector2Macro(ValueRange,double); vtkGetVector2Macro(ValueRange,double); // Description: // Set the range in alpha (using automatic generation). Alpha ranges from // [0,1]. vtkSetVector2Macro(AlphaRange,double); vtkGetVector2Macro(AlphaRange,double); // Description: // Set the color to use when a NaN (not a number) is encountered. This is an // RGBA 4-tuple of doubles in the range [0,1]. vtkSetVector4Macro(NanColor, double); vtkGetVector4Macro(NanColor, double); // Description: // Return the \a NanColor as a pointer to 4 unsigned chars. This // will overwrite any data returned by previous calls to MapValue. unsigned char* GetNanColorAsUnsignedChars(); // Description: // Cast a double color in a type T color. colorIn and colorOut are // expected to be RGBA[4] and colorIn to be in [0.0, 1.0] static void GetColorAsUnsignedChars(const double colorIn[4], unsigned char colorOut[4]); // Description: // Set the color to use when a value below the range is // encountered. This is an RGBA 4-tuple of doubles in the range [0, 1]. vtkSetVector4Macro(BelowRangeColor, double); vtkGetVector4Macro(BelowRangeColor, double); // Description: // Set whether the below range color should be used. vtkSetMacro(UseBelowRangeColor, int); vtkGetMacro(UseBelowRangeColor, int); vtkBooleanMacro(UseBelowRangeColor, int); // Description: // Set the color to use when a value above the range is // encountered. This is an RGBA 4-tuple of doubles in the range [0, 1]. vtkSetVector4Macro(AboveRangeColor, double); vtkGetVector4Macro(AboveRangeColor, double); // Description: // Set whether the below range color should be used. vtkSetMacro(UseAboveRangeColor, int); vtkGetMacro(UseAboveRangeColor, int); vtkBooleanMacro(UseAboveRangeColor, int); // Description: // Map one value through the lookup table. unsigned char* MapValue(double v); // Description: // Map one value through the lookup table and return the color as // an RGB array of doubles between 0 and 1. void GetColor(double x, double rgb[3]); // Description: // Map one value through the lookup table and return the alpha value // (the opacity) as a double between 0 and 1. double GetOpacity(double v); // Description: // Return the table index associated with a particular value. // // Do not use this function when \a IndexedLookup is true: // in that case, the set of values \a v may take on is exactly the integers // from 0 to \a GetNumberOfTableValues() - 1; // and \a v serves directly as an index into \a TableValues. virtual vtkIdType GetIndex(double v); // Description: // Specify the number of values (i.e., colors) in the lookup // table. void SetNumberOfTableValues(vtkIdType number); vtkIdType GetNumberOfTableValues() { return this->NumberOfColors; }; // Description: // Directly load color into lookup table. Use [0,1] double values for color // component specification. Make sure that you've either used the // Build() method or used SetNumberOfTableValues() prior to using this // method. virtual void SetTableValue(vtkIdType indx, double rgba[4]); // Description: // Directly load color into lookup table. Use [0,1] double values for color // component specification. virtual void SetTableValue(vtkIdType indx, double r, double g, double b, double a=1.0); // Description: // Return a rgba color value for the given index into the lookup table. Color // components are expressed as [0,1] double values. double *GetTableValue(vtkIdType id); // Description: // Return a rgba color value for the given index into the lookup table. Color // components are expressed as [0,1] double values. void GetTableValue(vtkIdType id, double rgba[4]); // Description: // Get pointer to color table data. Format is array of unsigned char // r-g-b-a-r-g-b-a... unsigned char *GetPointer(const vtkIdType id) { return this->Table->GetPointer(4*id); }; // Description: // Get pointer to data. Useful for direct writes into object. MaxId is bumped // by number (and memory allocated if necessary). Id is the location you // wish to write into; number is the number of rgba values to write. // // \warning If you modify the table data via the pointer returned by this // member function, you must call vtkLookupTable::BuildSpecialColors() // afterwards to ensure that the special colors (below/above range and NaN // value) are up-to-date. unsigned char *WritePointer(const vtkIdType id, const int number); // Description: // Sets/Gets the range of scalars which will be mapped. This is a duplicate // of Get/SetTableRange. double *GetRange() { return this->GetTableRange(); }; void SetRange(double min, double max) { this->SetTableRange(min, max); }; void SetRange(double rng[2]) { this->SetRange(rng[0], rng[1]); }; // Description: // Returns the log of \c range in \c log_range. // There is a little more to this than simply taking the log10 of the // two range values: we do conversion of negative ranges to positive // ranges, and conversion of zero to a 'very small number'. static void GetLogRange(const double range[2], double log_range[2]); // Description: // Apply log to value, with appropriate constraints. static double ApplyLogScale(double v, const double range[2], const double log_range[2]); // Description: // Set the number of colors in the lookup table. Use // SetNumberOfTableValues() instead, it can be used both before and // after the table has been built whereas SetNumberOfColors() has no // effect after the table has been built. vtkSetClampMacro(NumberOfColors,vtkIdType,2,VTK_ID_MAX); vtkGetMacro(NumberOfColors,vtkIdType); // Description: // Set/Get the internal table array that is used to map the scalars // to colors. The table array is an unsigned char array with 4 // components representing RGBA. void SetTable(vtkUnsignedCharArray *); vtkGetObjectMacro(Table,vtkUnsignedCharArray); // Description: // map a set of scalars through the lookup table // // This member function is thread safe. void MapScalarsThroughTable2(void *input, unsigned char *output, int inputDataType, int numberOfValues, int inputIncrement, int outputIncrement); // Description: // Copy the contents from another LookupTable void DeepCopy(vtkScalarsToColors *lut); // Description: // This should return 1 is the subclass is using log scale for mapping scalars // to colors. Returns 1 is scale == VTK_SCALE_LOG10. virtual int UsingLogScale() { return (this->GetScale() == VTK_SCALE_LOG10)? 1 : 0; } // Description: // Get the number of available colors for mapping to. virtual vtkIdType GetNumberOfAvailableColors(); // Description: // Return a color given an integer index. // // This is used to assign colors to annotations (given an offset into the // list of annotations). // If the table is empty or \a idx < 0, then NanColor is returned. virtual void GetIndexedColor(vtkIdType idx, double rgba[4]); protected: vtkLookupTable(int sze=256, int ext=256); ~vtkLookupTable(); vtkIdType NumberOfColors; vtkUnsignedCharArray *Table; double TableRange[2]; double HueRange[2]; double SaturationRange[2]; double ValueRange[2]; double AlphaRange[2]; double NanColor[4]; double BelowRangeColor[4]; int UseBelowRangeColor; double AboveRangeColor[4]; int UseAboveRangeColor; int Scale; int Ramp; vtkTimeStamp InsertTime; vtkTimeStamp BuildTime; double RGBA[4]; //used during conversion process unsigned char NanColorChar[4]; int OpaqueFlag; vtkTimeStamp OpaqueFlagBuildTime; // Description: // Resize the LookupTable to have enough room for the out-of-range colors void ResizeTableForSpecialColors(); private: vtkLookupTable(const vtkLookupTable&) VTK_DELETE_FUNCTION; void operator=(const vtkLookupTable&) VTK_DELETE_FUNCTION; }; //---------------------------------------------------------------------------- inline unsigned char *vtkLookupTable::WritePointer(const vtkIdType id, const int number) { this->InsertTime.Modified(); return this->Table->WritePointer(4*id,4*number); } #endif
SimVascular/VTK
Common/Core/vtkLookupTable.h
C
bsd-3-clause
14,969
package org.jzy3d.chart.controllers; public enum ControllerType { ZOOM, SHIFT, ROTATE, PAN }
freddy33/jzy3d
src/api/org/jzy3d/chart/controllers/ControllerType.java
Java
bsd-3-clause
106
'use strict'; //var bhl_url = "http://words.bighugelabs.com/api/2/d1cbb0c53ddabe240d726e3fc76b1491/"; var bhl_url = "http://words.bighugelabs.com/api/2/0791f67e8212761406e388ef91856024/"; var sel_x = 0, sel_y = 0; var word = ''; var help_paragraph_en = $('<p></p>').addClass('help-paragraph').text("Click the mouse or any key to close"); var error_paragraph_en = $('<p></p>').text("Sorry, no synonyms found"); chrome.runtime.onMessage.addListener( function(req, sen, sendResponse) { if (req.action == "enable synonyms") { if (req.status == true) { $(document).bind('dblclick', onDoubleClickEn); console.log("Activating synonyms"); } else { $(document).unbind('dblclick', onDoubleClickEn); console.log("Deactivating synonyms"); } } } ); function onDoubleClickEn(e) { get_selection_en(); console.log("Word: " + word); } function get_selection_en() { var txt = ''; var zoomCoef = 1; chrome.storage.local.get({ 'token' : "", 'preferences' : {} }, function(results) { console.log("Antes de control de error: " + JSON.stringify(results)); if (!(chrome.runtime.lastError)) { console.log("No error: " + JSON.stringify(results['preferences'])); if (results['preferences'].hasOwnProperty('magnification')) { zoomCoef = results['preferences'].magnification; } console.log("ZoomCoef: " + results['preferences'].magnification); if (window.getSelection) { txt = window.getSelection(); var selection = txt.getRangeAt(0); var bodyRect = document.body.getBoundingClientRect(); var sel_xx = selection.getBoundingClientRect().left; var sel_yy = selection.getBoundingClientRect().top; sel_x = (sel_xx - bodyRect.left) / zoomCoef; sel_y = (sel_yy - bodyRect.top) / zoomCoef; } else if (document.getSelection) { txt = document.getSelection(); var selection = txt.getRangeAt(0); var bodyRect = document.body.getBoundingClientRect(); var sel_xx = selection.getBoundingClientRect().left; var sel_yy = selection.getBoundingClientRect().top; sel_x = (sel_xx - bodyRect.left) / zoomCoef; sel_y = (sel_yy - bodyRect.top) / zoomCoef; } else if (document.selection) { txt = document.selection.createRange().text; } console.log("Position X: " + sel_x + " - Y: " + sel_y); word = $.trim(txt.toString()); displaySynonims(); } }); } function displaySynonims() { if (word.length > 0) { chrome.runtime.sendMessage({ method: 'GET', action: 'xhttp', url: bhl_url + word + "/json", format: 'application/json' }, function(responseText) { var synonyms = JSON.parse(responseText); if(synonyms.hasOwnProperty('error')) { showErrorTooltipEn(); } else { console.log("Xhttp response: " + responseText); showTooltipEn(synonyms); /*Callback function to deal with the response*/ } }); } } function showTooltipEn(synonyms) { //var synonyms = JSON.parse(synonymsJSON); var tooltipDiv = $("<div class='tooltip'></div>"); $(tooltipDiv).css("top", sel_y); $(tooltipDiv).css("left", sel_x); var superTitle = $("<h2></h2>").text('Synonyms of "' + word + '"'); $(tooltipDiv).append(superTitle); if (synonyms.hasOwnProperty("noun")) { if ((synonyms.noun.hasOwnProperty("syn")) && (synonyms.noun.syn.length > 0) ) { var synonymsList = synonyms.noun.syn.join(", "); } var title = $("<h3></h3>").text("Noun"); var par = $("<p></p>").text(synonymsList); $(tooltipDiv).append(title); $(tooltipDiv).append(par); } if (synonyms.hasOwnProperty("verb")) { if ((synonyms.verb.hasOwnProperty("syn")) && (synonyms.verb.syn.length > 0) ) { var synonymsList = synonyms.verb.syn.join(", "); } var title = $("<h3></h3>").text("Verb"); var par = $("<p></p>").text(synonymsList); $(tooltipDiv).append(title); $(tooltipDiv).append(par); } if (synonyms.hasOwnProperty("adjective")) { if ((synonyms.adjective.hasOwnProperty("syn")) && (synonyms.adjective.syn.length > 0) ) { var synonymsList = synonyms.adjective.syn.join(", "); } var title = $("<h3></h3>").text("Adjective"); var par = $("<p></p>").text(synonymsList); $(tooltipDiv).append(title); $(tooltipDiv).append(par); } if (synonyms.hasOwnProperty("adverb")) { if ((synonyms.adverb.hasOwnProperty("syn")) && (synonyms.adverb.syn.length > 0) ) { var synonymsList = synonyms.adverb.syn.join(", "); } var title = $("<h3></h3>").text("Adverb"); var par = $("<p></p>").text(synonymsList); $(tooltipDiv).append(title); $(tooltipDiv).append(par); } $(tooltipDiv).append(help_paragraph_en); $('body').append(tooltipDiv); } function showErrorTooltipEn() { var tooltipDiv = $("<div class='tooltip'></div>"); $(tooltipDiv).css("top", sel_y); $(tooltipDiv).css("left", sel_x); var superTitle = $("<h2></h2>").text('Synonyms of "' + word + '"'); $(tooltipDiv).append(superTitle); $(tooltipDiv).append(error_paragraph_en); $(tooltipDiv).append(help_paragraph_en); $('body').append(tooltipDiv); } $(document).keyup(function(e) { $(".tooltip").remove(); }); $(document).mousedown(function(e) { $(".tooltip").remove(); });
GutiX/chrome4cloud
js/synonyms_en.js
JavaScript
bsd-3-clause
5,480
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file is the main file used when running tests with pytest directly, # in particular if running e.g. ``pytest docs/``. from importlib.util import find_spec import os import pkg_resources import tempfile try: from pytest_astropy_header.display import PYTEST_HEADER_MODULES except ImportError: PYTEST_HEADER_MODULES = {} import astropy if find_spec('asdf') is not None: from asdf import __version__ as asdf_version if asdf_version >= astropy.__minimum_asdf_version__: entry_points = [] for entry_point in pkg_resources.iter_entry_points('pytest11'): entry_points.append(entry_point.name) if "asdf_schema_tester" not in entry_points: pytest_plugins += ['asdf.tests.schema_tester'] PYTEST_HEADER_MODULES['Asdf'] = 'asdf' # Make sure we use temporary directories for the config and cache # so that the tests are insensitive to local configuration. os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config') os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache') os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy')) os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy')) # Note that we don't need to change the environment variables back or remove # them after testing, because they are only changed for the duration of the # Python process, and this configuration only matters if running pytest # directly, not from e.g. an IPython session.
MSeifert04/astropy
conftest.py
Python
bsd-3-clause
1,526
define([ "./SwigTemplate", "elenajs/node!path" ], function(SwigTemplate, path) { var cache = {}; return { module: 'swig', load: function(id, require, load) { var parts = id.split("!"), url = path.resolve(require.toUrl(parts[0])), result; if (url in cache) { result = cache[url]; } else { try { result = new SwigTemplate({templateSrc: url}); cache[url] = result; } catch (err) { console.error("rendering: " + url, err); } } load(result); } }; });
elenajs/elenajs-swig
lib/main.js
JavaScript
bsd-3-clause
720
<?php use yii\db\Schema; use yii\db\Migration; class m150526_100930_configs extends Migration { public function up() { $this->insert('{{%system_config}}',[ 'key'=>'contact_phone', 'value'=>'+38 000 0000 000', ]); $this->insert('{{%system_config}}',[ 'key'=>'contact_email', 'value'=>'contact@avsystems.com.ua', ]); } public function down() { } }
griga/m22-cms
yii-app/modules/admin/migrations/m150526_100930_configs.php
PHP
bsd-3-clause
482
using System; using System.Collections.Generic; using System.Linq; using System.Text; using Microsoft.Xna.Framework; using Microsoft.Xna.Framework.Audio; using Microsoft.Xna.Framework.Content; using Microsoft.Xna.Framework.GamerServices; using Microsoft.Xna.Framework.Graphics; using Microsoft.Xna.Framework.Input; using Microsoft.Xna.Framework.Media; namespace Breakout { class Brique { private Texture2D sprite; private Color color; private Vector2 position; private int hp; public Brique(Texture2D sprite, Color color, Vector2 position, int hp) { this.sprite = sprite; this.color = color; this.position = position; this.hp = hp; } public Rectangle getLocation() { return new Rectangle((int)position.X, (int)position.Y, sprite.Width, sprite.Height); } public int getHp() { return hp; } public Color getColor() { return color; } public void setHp(int hp) { this.hp = hp; } public void setColor(Color color) { this.color = color; } public void Update() { // TODO: faire de quoi } public void Draw(SpriteBatch spriteBatch) { spriteBatch.Draw(sprite, position, color); } } }
Alex0216/TFOL
Breakout/Breakout/GameObjects/Brique.cs
C#
bsd-3-clause
1,537
#!/usr/bin/python # Code is executed top-to-bottom on load. # Variables are defined at the first assignment a = 2 # defines `a` b = 2 # 'print' operator, simple form: just prints out human-readable representation # of the argument. NOTE: no \n! print a + b # Types in Python are dynamic! v = 42 # `v` is an integer print v v = 0.42 # now it's a float print v v = 2**76 # NEW: Loooong integers are supported! print v v = 4 + 0.2j # NEW: complex numbers! print v v = "almost but not quite entirely unlike tea" # now it's a string print v # 'print' operator, full form. print "%d %.1f %s" % (42, 4.2, "forty two") # non-optimal equivalent: print str(42) + " " + str(4.2) + " forty two"
denfromufa/mipt-course
demos/python/2_variables_and_types.py
Python
bsd-3-clause
694
<?php /** * @see https://github.com/zendframwork/zend-json for the canonical source repository * @copyright Copyright (c) 2005-2018 Zend Technologies USA Inc. (http://www.zend.com) * @license https://github.com/zendframwork/zend-json/blob/master/LICENSE.md New BSD License */ namespace ZendTest\Json\TestAsset; /** * Test class for encoding classes. */ class TestObject { const FOO = 'bar'; public $foo = 'bar'; public $bar = 'baz'; // @codingStandardsIgnoreStart protected $_foo = 'fooled you'; // @codingStandardsIgnoreEnd public function foo($bar, $baz) { } public function bar($baz) { } protected function baz() { } }
zendframework/zend-json
test/TestAsset/TestObject.php
PHP
bsd-3-clause
703
""" fabcloudkit Functions for managing Nginx. This module provides functions that check for installation, install, and manage an installation of, Nginx. /etc/init.d/nginx: The "init-script" that allows Nginx to be run automatically at system startup. The existence of this file is verified, but it's assumed that the script is installed by the package manager that installed Nginx. /etc/nginx/nginx.conf: The main or root Nginx configuration file. This file is loaded by Nginx when it launches. The file contains an include directive that tells Nginx to load additional configurations from a different directory. Currently, this code writes a very basic nginx.conf file. /etc/nginx/conf.d/: The directory marked by the include directive in the nginx root configuration file. Individual server configurations are stored in files in this folder. /etc/nginx/conf.g/*.conf: Individual server configuration files. <deploy_root>/<name>/logs/ngaccess.log, ngerror.log: Default location of the access (ngaccess.log) and error (ngerror.log) log files for a specific server configuration. This location can be overridden in the call to write_server_config(). For more information on Nginx check out: http://nginx.org, http://wiki.nginx.org :copyright: (c) 2013 by Rick Bohrer. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import # standard import posixpath as path # pypi from fabric.operations import run, sudo # package from fabcloudkit import cfg, put_string from ..internal import * from ..toolbase import Tool, SimpleTool class NginxTool(Tool): def __init__(self): super(NginxTool,self).__init__() self._simple = SimpleTool.create('nginx') def check(self, **kwargs): return self._simple.check() def install(self, **kwargs): # install Nginx using the package manager. self._simple.install() start_msg('----- Configuring "Nginx":') # verify that there's an init-script. result = run('test -f /etc/init.d/nginx') if result.failed: raise HaltError('Uh oh. Package manager did not install an Nginx init-script.') # write nginx.conf file. dest = path.join(cfg().nginx_conf, 'nginx.conf') message('Writing "nginx.conf"') put_string(_NGINX_CONF, dest, use_sudo=True) # the Amazon Linux AMI uses chkconfig; the init.d script won't do the job by itself. # set Nginx so it can be managed by chkconfig; and turn on boot startup. result = run('which chkconfig') if result.succeeded: message('System has chkconfig; configuring.') result = sudo('chkconfig --add nginx') if result.failed: raise HaltError('"chkconfig --add nginx" failed.') result = sudo('chkconfig nginx on') if result.failed: raise HaltError('"chkconfig nginx on" failed.') succeed_msg('Successfully installed and configured "Nginx".') return self def write_config(self, name, server_names, proxy_pass, static_locations='', log_root=None, listen=80): """ Writes an Nginx server configuration file. This function writes a specific style of configuration, that seems to be somewhat common, where Nginx is used as a reverse-proxy for a locally-running (e.g., WSGI) server. :param name: identifies the server name; used to name the configuration file. :param server_names: :param proxy_pass: identifies the local proxy to which Nginx will pass requests. """ start_msg('----- Writing Nginx server configuration for "{0}":'.format(name)) # be sure the log directory exists. if log_root is None: log_root = path.join(cfg().deploy_root, name, 'logs') result = sudo('mkdir -p {0}'.format(log_root)) if result.failed: raise HaltError('Unable to create log directory: "{0}"'.format(log_root)) # generate and write the configuration file. server_config = _NGINX_SERVER_CONF.format(**locals()) dest = path.join(cfg().nginx_include_conf, '{name}.conf'.format(**locals())) message('Writing to file: "{0}"'.format(dest)) put_string(server_config, dest, use_sudo=True) succeed_msg('Wrote conf file for "{0}".'.format(name)) return self def delete_config(self, name): start_msg('----- Deleting server configuration for "{0}":'.format(name)) # delete the file, but ignore any errors. config_name = '{name}.conf'.format(**locals()) result = sudo('rm -f {0}'.format(path.join(cfg().nginx_include_conf, config_name))) if result.failed: failed_msg('Ignoring failed attempt to delete configuration "{0}"'.format(config_name)) else: succeed_msg('Successfully deleted configuration "{0}".'.format(config_name)) return self def reload(self): start_msg('----- Telling "Nginx" to reload configuration:') result = sudo('/etc/init.d/nginx reload') if result.failed: raise HaltError('"Nginx" configuration reload failed ({0})'.format(result)) succeed_msg('Successfully reloaded.') return self # register. Tool.__tools__['nginx'] = NginxTool _NGINX_SERVER_CONF = """ server {{ listen {listen}; server_name {server_names}; access_log {log_root}/ngaccess.log; error_log {log_root}/ngerror.log; location / {{ proxy_pass {proxy_pass}; proxy_redirect off; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; client_max_body_size 10m; client_body_buffer_size 128k; proxy_connect_timeout 90; proxy_send_timeout 90; proxy_read_timeout 90; proxy_buffer_size 4k; proxy_buffers 4 32k; proxy_busy_buffers_size 64k; proxy_temp_file_write_size 64k; }} {static_locations} }} """.lstrip() _NGINX_CONF = """ user nginx; worker_processes 1; error_log /var/log/nginx/error.log; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; include /etc/nginx/conf.d/*.conf; } """.lstrip()
waxkinetic/fabcloudkit
fabcloudkit/tool/nginx.py
Python
bsd-3-clause
6,961
#include "Ht.h" #include "PersAddLd.h" #define BUSY_RETRY(b) { if (b) { HtRetry(); break; } } void CPersAddLd::PersAddLd() { if (PR_htValid) { switch (PR_htInst) { case ADD_LD1: { BUSY_RETRY(ReadMemBusy()); // Memory read request MemAddr_t memRdAddr = SR_op1Addr + (P_vecIdx << 3); ReadMem_op1(memRdAddr); HtContinue(ADD_LD2); } break; case ADD_LD2: { BUSY_RETRY(ReadMemBusy()); // Memory read request MemAddr_t memRdAddr = SR_op2Addr + (P_vecIdx << 3); ReadMem_op2(memRdAddr); ReadMemPause(ADD_RTN); } break; case ADD_RTN: { ht_uint1 dimenIdx = (ht_uint1)(PR_vecIdx & 0x1); BUSY_RETRY(SendReturnBusy_addLd()); BUSY_RETRY(SendUioBusy_ldOutA(dimenIdx)); BUSY_RETRY(SendUioBusy_ldOutB(dimenIdx)); //fprintf(stderr, "LD %d -> Sending %ld %ld\n", (int)SR_replId, (uint64_t)PR_op1, (uint64_t)PR_op2); SendUioData_ldOutA(dimenIdx, PR_op1); SendUioData_ldOutB(dimenIdx, PR_op2); SendReturn_addLd(); } break; default: assert(0); } } }
TonyBrewer/OpenHT
tests/userio_vadd_1/src_pers/PersAddLd_src.cpp
C++
bsd-3-clause
1,018