idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
50,100 | def all_columns ( self ) : columns = set ( ) for values in self . _parts : for value in values . _parts : columns . add ( value . column_name ) return sorted ( columns ) | Return list of all columns . |
50,101 | def get_annotation ( cls , fn ) : while fn is not None : if hasattr ( fn , '_schema_annotation' ) : return fn . _schema_annotation fn = getattr ( fn , 'im_func' , fn ) closure = getattr ( fn , '__closure__' , None ) fn = closure [ 0 ] . cell_contents if closure is not None else None return None | Find the _schema_annotation attribute for the given function . |
50,102 | def _create_request_schema ( self , params , required ) : schema = { 'additionalProperties' : True , 'definitions' : self . resolve ( '#/definitions' ) , 'properties' : { } , 'required' : required or ( ) , 'type' : 'object' } for param in params : schema [ 'properties' ] [ param ] = { '$ref' : '#/definitions/{}' . format ( param ) } return schema | Create a JSON schema for a request . |
50,103 | def update_note ( note_id : NoteId , body : Body = None , done : Done = None ) -> Note : if note_id != 1 : raise NotFoundError ( 'Note does not exist' ) new_note = note . copy ( ) if body is not None : new_note [ 'body' ] = body if done is not None : new_note [ 'done' ] = done return new_note | Update an existing note . |
50,104 | def to_task ( self ) : from google . appengine . api . taskqueue import Task task_args = self . get_task_args ( ) . copy ( ) payload = None if 'payload' in task_args : payload = task_args . pop ( 'payload' ) kwargs = { 'method' : METHOD_TYPE , 'payload' : json . dumps ( payload ) } kwargs . update ( task_args ) return Task ( ** kwargs ) | Return a task object representing this message . |
50,105 | def insert ( self ) : from google . appengine . api . taskqueue import Queue task = self . to_task ( ) Queue ( name = self . get_queue ( ) ) . add ( task ) | Insert the pull task into the requested queue default if non given . |
50,106 | def to_dict ( self ) : import copy options = copy . deepcopy ( self . _options ) eta = options . get ( 'task_args' , { } ) . get ( 'eta' ) if eta : options [ 'task_args' ] [ 'eta' ] = time . mktime ( eta . timetuple ( ) ) return options | Return this message as a dict suitable for json encoding . |
50,107 | def _get_id ( self ) : id = self . _options . get ( 'id' ) if id : return id id = uuid . uuid4 ( ) . hex self . update_options ( id = id ) return id | If this message has no id generate one . |
50,108 | def from_dict ( cls , message ) : message_options = message . copy ( ) eta = message_options . get ( 'task_args' , { } ) . get ( 'eta' ) if eta : from datetime import datetime message_options [ 'task_args' ] [ 'eta' ] = datetime . fromtimestamp ( eta ) return Message ( ** message_options ) | Return an message from a dict output by Async . to_dict . |
50,109 | def to_task ( self ) : task_args = self . get_task_args ( ) name = task_args . get ( 'name' , MESSAGE_PROCESSOR_NAME ) if not 'countdown' in task_args : task_args [ 'countdown' ] = self . frequency task_args [ 'name' ] = "%s-%s-%s-%s" % ( name , self . tag , self . current_batch , self . time_throttle ) self . update_options ( task_args = task_args ) return super ( MessageProcessor , self ) . to_task ( ) | Return a task object representing this MessageProcessor job . |
50,110 | def current_batch ( self ) : current_batch = memcache . get ( self . group_key ) if not current_batch : memcache . add ( self . group_key , 1 ) current_batch = 1 return current_batch | Return the batch id for the tag . |
50,111 | def fetch_messages ( self ) : if self . _fetched : return start = time . time ( ) loaded_messages = self . queue . lease_tasks_by_tag ( self . duration , self . size , tag = self . tag , deadline = self . deadline ) if ( not loaded_messages and round ( time . time ( ) - start , 1 ) >= self . deadline - 0.1 ) : raise DeadlineExceededError ( ) self . _messages . extend ( loaded_messages ) self . _fetched = True logging . debug ( "Calling fetch messages with %s:%s:%s:%s:%s:%s" % ( len ( self . _messages ) , len ( loaded_messages ) , len ( self . _processed_messages ) , self . duration , self . size , self . tag ) ) | Fetch messages from the specified pull - queue . |
50,112 | def next ( self ) : if not self . _messages : if self . auto_delete : self . delete_messages ( ) raise StopIteration message = self . _messages . pop ( 0 ) self . _processed_messages . append ( message ) return json . loads ( message . payload ) | Get the next batch of messages from the previously fetched messages . |
50,113 | def delete_messages ( self , only_processed = True ) : messages = self . _processed_messages if not only_processed : messages += self . _messages if messages : try : self . queue . delete_tasks ( messages ) except Exception : logging . exception ( "Error deleting messages" ) raise | Delete the messages previously leased . |
50,114 | def copy_func ( func : Callable ) -> Callable : copied = types . FunctionType ( func . __code__ , func . __globals__ , name = func . __name__ , argdefs = func . __defaults__ , closure = func . __closure__ ) copied = functools . update_wrapper ( copied , func ) copied . __kwdefaults__ = func . __kwdefaults__ return copied | Returns a copy of a function . |
50,115 | def get_params_from_func ( func : Callable , signature : Signature = None ) -> Params : if signature is None : signature = getattr ( func , '_doctor_signature' , None ) if signature is None : signature = inspect . signature ( func ) if getattr ( func , '_doctor_req_obj_type' , None ) : annotation = func . _doctor_req_obj_type all_params = list ( annotation . properties . keys ( ) ) required = annotation . required optional = list ( set ( all_params ) - set ( required ) ) else : required = [ key for key , p in signature . parameters . items ( ) if p . default == p . empty and issubclass ( p . annotation , SuperType ) ] optional = [ key for key , p in signature . parameters . items ( ) if p . default != p . empty ] all_params = [ key for key in signature . parameters . keys ( ) ] logic_params = copy ( all_params ) return Params ( all_params , required , optional , logic_params ) | Gets all parameters from a function signature . |
50,116 | def add_param_annotations ( logic : Callable , params : List [ RequestParamAnnotation ] ) -> Callable : if hasattr ( logic , '_doctor_signature' ) : sig = logic . _doctor_signature doctor_params = logic . _doctor_params else : sig = inspect . signature ( logic ) doctor_params = get_params_from_func ( logic , sig ) prev_parameters = { name : param for name , param in sig . parameters . items ( ) } new_params = [ ] for param in params : if param . name in prev_parameters : logging . warning ( 'Not adding %s to signature of %s, function ' 'already has that parameter in its signature.' , param . name , logic . __name__ ) continue doctor_params . all . append ( param . name ) default = None if param . required : default = Parameter . empty doctor_params . required . append ( param . name ) else : doctor_params . optional . append ( param . name ) new_params . append ( Parameter ( param . name , Parameter . KEYWORD_ONLY , default = default , annotation = param . annotation ) ) new_sig = sig . replace ( parameters = list ( prev_parameters . values ( ) ) + new_params ) logic . _doctor_signature = new_sig logic . _doctor_params = doctor_params return logic | Adds parameter annotations to a logic function . |
50,117 | def get_module_attr ( module_filename , module_attr , namespace = None ) : if namespace is None : namespace = { } module_filename = os . path . abspath ( module_filename ) namespace [ '__file__' ] = module_filename module_dir = os . path . dirname ( module_filename ) old_cwd = os . getcwd ( ) old_sys_path = sys . path [ : ] try : os . chdir ( module_dir ) sys . path . append ( module_dir ) with open ( module_filename , 'r' ) as mf : exec ( compile ( mf . read ( ) , module_filename , 'exec' ) , namespace ) return namespace [ module_attr ] finally : os . chdir ( old_cwd ) sys . path = old_sys_path | Get an attribute from a module . |
50,118 | def get_description_lines ( docstring ) : if prepare_docstring is None : raise ImportError ( 'sphinx must be installed to use this function.' ) if not isinstance ( docstring , str ) : return [ ] lines = [ ] for line in prepare_docstring ( docstring ) : if DESCRIPTION_END_RE . match ( line ) : break lines . append ( line ) if lines and lines [ - 1 ] != '' : lines . append ( '' ) return lines | Extract the description from the given docstring . |
50,119 | def get_valid_class_name ( s : str ) -> str : s = str ( s ) . strip ( ) s = '' . join ( [ w . title ( ) for w in re . split ( r'\W+|_' , s ) ] ) return re . sub ( r'[^\w|_]' , '' , s ) | Return the given string converted so that it can be used for a class name |
50,120 | def execution_context_from_async ( async ) : local_context = _local . get_local_context ( ) if local_context . _executing_async_context : raise errors . ContextExistsError execution_context = _ExecutionContext ( async ) local_context . _executing_async_context = execution_context return execution_context | Instantiate a new _ExecutionContext and store a reference to it in the global async context to make later retrieval easier . |
50,121 | def get_cli_service ( self , command_mode ) : return self . _cli . get_session ( self . _new_sessions ( ) , command_mode , self . _logger ) | Use cli . get_session to open CLI connection and switch into required mode |
50,122 | def check_crystal_equivalence ( crystal_a , crystal_b ) : cryst_a = spglib . get_symmetry_dataset ( ase_to_spgcell ( crystal_a ) , symprec = 1e-5 , angle_tolerance = - 1.0 , hall_number = 0 ) cryst_b = spglib . get_symmetry_dataset ( ase_to_spgcell ( crystal_b ) , symprec = 1e-5 , angle_tolerance = - 1.0 , hall_number = 0 ) samecell = np . allclose ( cryst_a [ 'std_lattice' ] , cryst_b [ 'std_lattice' ] , atol = 1e-5 ) samenatoms = len ( cryst_a [ 'std_positions' ] ) == len ( cryst_b [ 'std_positions' ] ) samespg = cryst_a [ 'number' ] == cryst_b [ 'number' ] def test_rotations_translations ( cryst_a , cryst_b , repeat ) : cell = cryst_a [ 'std_lattice' ] pristine = crystal ( 'Mg' , [ ( 0 , 0. , 0. ) ] , spacegroup = int ( cryst_a [ 'number' ] ) , cellpar = [ cell [ 0 ] / repeat [ 0 ] , cell [ 1 ] / repeat [ 1 ] , cell [ 2 ] / repeat [ 2 ] ] ) . repeat ( repeat ) sym_set_p = spglib . get_symmetry_dataset ( ase_to_spgcell ( pristine ) , symprec = 1e-5 , angle_tolerance = - 1.0 , hall_number = 0 ) for _ , trans in enumerate ( zip ( sym_set_p [ 'rotations' ] , sym_set_p [ 'translations' ] ) ) : pnew = ( np . matmul ( trans [ 0 ] , cryst_a [ 'std_positions' ] . T ) . T + trans [ 1 ] ) % 1.0 fulln = np . concatenate ( [ cryst_a [ 'std_types' ] [ : , None ] , pnew ] , axis = 1 ) fullb = np . concatenate ( [ cryst_b [ 'std_types' ] [ : , None ] , cryst_b [ 'std_positions' ] ] , axis = 1 ) sorted_n = np . array ( sorted ( [ list ( row ) for row in list ( fulln ) ] ) ) sorted_b = np . array ( sorted ( [ list ( row ) for row in list ( fullb ) ] ) ) if np . allclose ( sorted_n , sorted_b , atol = 1e-5 ) : return True return False if samecell and samenatoms and samespg : cell = cryst_a [ 'std_lattice' ] rng1 = range ( 1 , int ( norm ( cell [ 0 ] ) / 2. ) ) rng2 = range ( 1 , int ( norm ( cell [ 1 ] ) / 2. ) ) rng3 = range ( 1 , int ( norm ( cell [ 2 ] ) / 2. ) ) for repeat in itertools . product ( rng1 , rng2 , rng3 ) : if test_rotations_translations ( cryst_a , cryst_b , repeat ) : return True return False | Function that identifies whether two crystals are equivalent |
50,123 | def add_view_info ( self , view_info : ViewInfo ) : try : next ( info for info in self . _view_infos if info . view == view_info . view ) except StopIteration : indent = len ( self . _view_infos ) * '\t' self . _view_infos . append ( view_info ) info = 'Line {0} in "{1}"' . format ( view_info . line , view_info . view ) self . add_info ( indent + 'View info' , info ) | Adds view information to error message |
50,124 | def add_cause ( self , error : Exception ) : self . add_info ( 'Cause error' , '{0} - {1}' . format ( type ( error ) . __name__ , error ) ) | Adds cause error to error message |
50,125 | def _log_task_info ( headers , extra_task_info = None ) : ran_at = time . time ( ) task_eta = float ( headers . get ( 'X-Appengine-Tasketa' , 0.0 ) ) task_info = { 'retry_count' : headers . get ( 'X-Appengine-Taskretrycount' , '' ) , 'execution_count' : headers . get ( 'X-Appengine-Taskexecutioncount' , '' ) , 'task_eta' : task_eta , 'ran' : ran_at , 'gae_latency_seconds' : ran_at - task_eta } if extra_task_info : task_info [ 'extra' ] = extra_task_info logging . debug ( 'TASK-INFO: %s' , json . dumps ( task_info ) ) | Processes the header from task requests to log analytical data . |
50,126 | def _parse_ports ( self , ports ) : if not ports : return [ ] return [ tuple ( port_pair . split ( "::" ) ) for port_pair in ports . strip ( ";" ) . split ( ";" ) ] | Parse ports string into the list |
50,127 | def add_trunk_ports ( self ) : ports = self . attributes . get ( "{}Enable Full Trunk Ports" . format ( self . namespace_prefix ) , None ) return self . _parse_ports ( ports = ports ) | SDN Controller enable trunk ports |
50,128 | def remove_trunk_ports ( self ) : ports = self . attributes . get ( "{}Disable Full Trunk Ports" . format ( self . namespace_prefix ) , None ) return self . _parse_ports ( ports = ports ) | SDN Controller disable trunk ports |
50,129 | def _validate_build_resource_structure ( autoload_resource ) : result = { } for resource_prefix , resources in autoload_resource . iteritems ( ) : max_free_index = max ( map ( int , resources ) ) + 1 or 1 for index , sub_resources in resources . iteritems ( ) : if not index or index == - 1 : index = max_free_index max_free_index += 1 if len ( sub_resources ) > 1 : result [ "{0}{1}" . format ( resource_prefix , index ) ] = sub_resources [ 0 ] for resource in sub_resources [ 1 : ] : result [ "{0}{1}" . format ( resource_prefix , str ( max_free_index ) ) ] = resource max_free_index += 1 else : result [ "{0}{1}" . format ( resource_prefix , index ) ] = sub_resources [ 0 ] return result | Validate resource structure |
50,130 | def _build_autoload_details ( self , autoload_data , relative_path = "" ) : self . _autoload_details . attributes . extend ( [ AutoLoadAttribute ( relative_address = relative_path , attribute_name = attribute_name , attribute_value = attribute_value ) for attribute_name , attribute_value in autoload_data . attributes . iteritems ( ) ] ) for resource_relative_path , resource in self . _validate_build_resource_structure ( autoload_data . resources ) . iteritems ( ) : full_relative_path = posixpath . join ( relative_path , resource_relative_path ) self . _autoload_details . resources . append ( AutoLoadResource ( model = resource . cloudshell_model_name , name = resource . name , relative_address = full_relative_path , unique_identifier = resource . unique_identifier ) ) self . _build_autoload_details ( autoload_data = resource , relative_path = full_relative_path ) | Build autoload details |
50,131 | def set_attr ( self , key : str , value ) : self . attr_setter ( self , key , value ) | Sets node attribute . Can be customized by attr_setter property |
50,132 | def new ( self , node : Node ) : return Property ( self . name , self . _setter , node ) | Creates property for node |
50,133 | def from_context ( cls , context , shell_type = None , shell_name = None ) : return cls ( address = context . resource . address , family = context . resource . family , shell_type = shell_type , shell_name = shell_name , fullname = context . resource . fullname , attributes = dict ( context . resource . attributes ) , name = context . resource . name ) | Create an instance of TrafficGeneratorVBladeResource from the given context |
50,134 | def update_filter_dict ( d , user , status ) : if user is not None : if not user . id : return None d [ 'user' ] = user if status is not None : d [ 'status' ] = status | Helper . Updates filter dict for a queryset . |
50,135 | def get_flags_for_objects ( cls , objects_list , user = None , status = None ) : return get_model_class_from_string ( MODEL_FLAG ) . get_flags_for_objects ( objects_list , user = user , status = status ) | Returns a dictionary with flag objects associated with the given model objects . The dictionary is indexed by objects IDs . Each dict entry contains a list of associated flag objects . |
50,136 | def get_flags ( self , user = None , status = None ) : filter_kwargs = { } update_filter_dict ( filter_kwargs , user , status ) return self . flags . filter ( ** filter_kwargs ) . all ( ) | Returns flags for the object optionally filtered by status . |
50,137 | def set_flag ( self , user , note = None , status = None ) : if not user . id : return None init_kwargs = { 'user' : user , 'linked_object' : self , } if note is not None : init_kwargs [ 'note' ] = note if status is not None : init_kwargs [ 'status' ] = status flag = get_flag_model ( ) ( ** init_kwargs ) try : flag . save ( ) except IntegrityError : pass return flag | Flags the object . |
50,138 | def is_flagged ( self , user = None , status = None ) : filter_kwargs = { 'content_type' : ContentType . objects . get_for_model ( self ) , 'object_id' : self . id , } update_filter_dict ( filter_kwargs , user , status ) return self . flags . filter ( ** filter_kwargs ) . count ( ) | Returns boolean whether the objects is flagged by a user . |
50,139 | def set_backend ( database ) : new_backend = BACKENDS . get ( database . lower ( ) ) if not new_backend : raise Exception ( 'Backend {} is not supported.' . format ( database ) ) global BACKEND BACKEND = new_backend | Configure used database so sqlpuzzle can generate queries which are needed . For now there is only support of MySQL and PostgreSQL . |
50,140 | def health_check ( self ) : api_response = 'Online' result = 'Health check on resource {}' . format ( self . _resource_name ) try : health_check_flow = RunCommandFlow ( self . cli_handler , self . _logger ) health_check_flow . execute_flow ( ) result += ' passed.' except Exception as e : self . _logger . exception ( e ) api_response = 'Error' result += ' failed.' try : self . _api . SetResourceLiveStatus ( self . _resource_name , api_response , result ) except Exception : self . _logger . error ( 'Cannot update {} resource status on portal' . format ( self . _resource_name ) ) return result | Verify that device is accessible over CLI by sending ENTER for cli session |
50,141 | def reference ( cls , value ) : from sqlpuzzle . _common . utils import force_text value = force_text ( value ) parts = re . split ( r'{quote}([^{quote}]+){quote}|\.' . format ( quote = cls . reference_quote ) , value ) parts = ( '{quote}{i}{quote}' . format ( quote = cls . reference_quote , i = i ) if i != '*' else i for i in parts if i ) return '.' . join ( parts ) | Convert as reference on column . table = > table table . column = > table . column db . table . column = > db . table . column table . col . umn = > table . col . umn table . col . umn = > table . col . umn |
50,142 | def reference_to_path ( reference ) : if isinstance ( reference , basestring ) : import re if not re . match ( r'^[^\d\W]([a-zA-Z._]|((?<!\.)\d))+$' , reference ) : raise errors . BadObjectPathError ( 'Invalid reference path, must meet Python\'s identifier ' 'requirements, passed value was "%s".' , reference ) return reference if callable ( reference ) : parts = [ reference . __module__ ] if hasattr ( reference , 'im_class' ) : parts . append ( reference . im_class . __name__ ) if hasattr ( reference , 'func_name' ) : parts . append ( reference . func_name ) elif reference . __module__ == '__builtin__' : return reference . __name__ elif hasattr ( reference , '__name__' ) : parts . append ( reference . __name__ ) else : raise errors . BadObjectPathError ( "Invalid object type." ) return '.' . join ( parts ) raise errors . BadObjectPathError ( "Unable to determine path to callable." ) elif hasattr ( reference , '__package__' ) : return reference . __name__ raise errors . BadObjectPathError ( "Must provide a reference path or reference." ) | Convert a reference to a Python object to a string path . |
50,143 | def path_to_reference ( path ) : path = str ( path ) if '.' not in path : try : return globals ( ) [ "__builtins__" ] [ path ] except KeyError : try : return getattr ( globals ( ) [ "__builtins__" ] , path ) except AttributeError : pass try : return globals ( ) [ path ] except KeyError : pass raise errors . BadObjectPathError ( 'Unable to find function "%s".' % ( path , ) ) module_path , function_name = path . rsplit ( '.' , 1 ) try : module = __import__ ( name = module_path , fromlist = [ function_name ] ) except ImportError : module_path , class_name = module_path . rsplit ( '.' , 1 ) module = __import__ ( name = module_path , fromlist = [ class_name ] ) module = getattr ( module , class_name ) try : return getattr ( module , function_name ) except AttributeError : raise errors . BadObjectPathError ( 'Unable to find function "%s".' % ( path , ) ) | Convert an object path reference to a reference . |
50,144 | def encode_callbacks ( callbacks ) : from furious . async import Async if not callbacks : return encoded_callbacks = { } for event , callback in callbacks . iteritems ( ) : if callable ( callback ) : callback , _ = get_function_path_and_options ( callback ) elif isinstance ( callback , Async ) : callback = callback . to_dict ( ) encoded_callbacks [ event ] = callback return encoded_callbacks | Encode callbacks to as a dict suitable for JSON encoding . |
50,145 | def decode_callbacks ( encoded_callbacks ) : from furious . async import Async callbacks = { } for event , callback in encoded_callbacks . iteritems ( ) : if isinstance ( callback , dict ) : async_type = Async if '_type' in callback : async_type = path_to_reference ( callback [ '_type' ] ) callback = async_type . from_dict ( callback ) else : callback = path_to_reference ( callback ) callbacks [ event ] = callback return callbacks | Decode the callbacks to an executable form . |
50,146 | def _format_stack ( self , stack , current = None ) : if current is not None : stack = stack + [ current ] if len ( stack ) > 1 : prefix = os . path . commonprefix ( stack ) if prefix . endswith ( '/' ) : prefix = prefix [ : - 1 ] stack = [ scope [ len ( prefix ) : ] for scope in stack ] return ' => ' . join ( stack ) | Prettifies a scope stack for use in error messages . |
50,147 | def resolve ( self , ref , document = None ) : try : url = self . _urljoin_cache ( self . resolution_scope , ref ) if document is None : resolved = self . _remote_cache ( url ) else : _ , fragment = urldefrag ( url ) resolved = self . resolve_fragment ( document , fragment ) except jsonschema . RefResolutionError as e : message = e . args [ 0 ] if self . _scopes_stack : message = '{} (from {})' . format ( message , self . _format_stack ( self . _scopes_stack ) ) raise SchemaError ( message ) if isinstance ( resolved , dict ) and '$ref' in resolved : if url in self . _scopes_stack : raise SchemaError ( 'Circular reference in schema: {}' . format ( self . _format_stack ( self . _scopes_stack + [ url ] ) ) ) try : self . push_scope ( url ) return self . resolve ( resolved [ '$ref' ] ) finally : self . pop_scope ( ) else : return url , resolved | Resolve a fragment within the schema . |
50,148 | def resolve_remote ( self , uri ) : if uri . startswith ( 'file://' ) : try : path = uri [ 7 : ] with open ( path , 'r' ) as schema_file : result = yaml . load ( schema_file ) if self . cache_remote : self . store [ uri ] = result return result except yaml . parser . ParserError as e : logging . debug ( 'Error parsing {!r} as YAML: {}' . format ( uri , e ) ) return super ( SchemaRefResolver , self ) . resolve_remote ( uri ) | Add support to load YAML files . |
50,149 | def get_validator ( self , schema = None ) : schema = schema if schema is not None else self . schema return jsonschema . Draft4Validator ( schema , resolver = self . resolver , format_checker = jsonschema . draft4_format_checker ) | Get a jsonschema validator . |
50,150 | def resolve ( self , ref , document = None ) : _ , resolved = self . resolver . resolve ( ref , document = document ) return resolved | Resolve a ref within the schema . |
50,151 | def resolver ( self ) : if self . _resolver is not None : return self . _resolver if self . _schema_path is not None : self . _resolver = SchemaRefResolver ( 'file://' + self . _schema_path + '/' , self . schema ) else : self . _resolver = SchemaRefResolver . from_schema ( self . schema ) return self . _resolver | jsonschema RefResolver object for the base schema . |
50,152 | def validate ( self , value , validator ) : try : validator . validate ( value ) except Exception as e : logging . debug ( e , exc_info = e ) if isinstance ( e , DoctorError ) : raise else : validation_errors = sorted ( validator . iter_errors ( value ) , key = lambda e : e . path ) errors = { } for error in validation_errors : try : key = error . path [ 0 ] except IndexError : key = '_other' errors [ key ] = error . args [ 0 ] raise SchemaValidationError ( e . args [ 0 ] , errors = errors ) return value | Validates and returns the value . |
50,153 | def validate_json ( self , json_value , validator ) : value = parse_json ( json_value ) return self . validate ( value , validator ) | Validates and returns the parsed JSON string . |
50,154 | def from_file ( cls , schema_filepath , * args , ** kwargs ) : schema_filepath = os . path . abspath ( schema_filepath ) try : with open ( schema_filepath , 'r' ) as schema_file : schema = yaml . load ( schema_file . read ( ) ) except Exception : msg = 'Error loading schema file {}' . format ( schema_filepath ) logging . exception ( msg ) raise SchemaLoadingError ( msg ) return cls ( schema , * args , schema_path = os . path . dirname ( schema_filepath ) , ** kwargs ) | Create an instance from a YAML or JSON schema file . |
50,155 | def execute_flow ( self , custom_command = "" , is_config = False ) : responses = [ ] if isinstance ( custom_command , str ) : commands = [ custom_command ] elif isinstance ( custom_command , tuple ) : commands = list ( custom_command ) else : commands = custom_command if is_config : mode = self . _cli_handler . config_mode if not mode : raise Exception ( self . __class__ . __name__ , "CliHandler configuration is missing. Config Mode has to be defined" ) else : mode = self . _cli_handler . enable_mode if not mode : raise Exception ( self . __class__ . __name__ , "CliHandler configuration is missing. Enable Mode has to be defined" ) with self . _cli_handler . get_cli_service ( mode ) as session : for cmd in commands : responses . append ( session . send_command ( command = cmd ) ) return '\n' . join ( responses ) | Execute flow which run custom command on device |
50,156 | def run_custom_config_command ( self , custom_command ) : return self . run_command_flow . execute_flow ( custom_command = custom_command , is_config = True ) | Execute custom command in configuration mode on device |
50,157 | def async_from_options ( options ) : _type = options . pop ( '_type' , 'furious.async.Async' ) _type = path_to_reference ( _type ) return _type . from_dict ( options ) | Deserialize an Async or Async subclass from an options dict . |
50,158 | def encode_async_options ( async ) : options = copy . deepcopy ( async . _options ) options [ '_type' ] = reference_to_path ( async . __class__ ) eta = options . get ( 'task_args' , { } ) . get ( 'eta' ) if eta : options [ 'task_args' ] [ 'eta' ] = time . mktime ( eta . timetuple ( ) ) callbacks = async . _options . get ( 'callbacks' ) if callbacks : options [ 'callbacks' ] = encode_callbacks ( callbacks ) if '_context_checker' in options : _checker = options . pop ( '_context_checker' ) options [ '__context_checker' ] = reference_to_path ( _checker ) if '_process_results' in options : _processor = options . pop ( '_process_results' ) options [ '__process_results' ] = reference_to_path ( _processor ) return options | Encode Async options for JSON encoding . |
50,159 | def decode_async_options ( options ) : async_options = copy . deepcopy ( options ) eta = async_options . get ( 'task_args' , { } ) . get ( 'eta' ) if eta : from datetime import datetime async_options [ 'task_args' ] [ 'eta' ] = datetime . fromtimestamp ( eta ) callbacks = async_options . get ( 'callbacks' , { } ) if callbacks : async_options [ 'callbacks' ] = decode_callbacks ( callbacks ) if '__context_checker' in options : _checker = options [ '__context_checker' ] async_options [ '_context_checker' ] = path_to_reference ( _checker ) if '__process_results' in options : _processor = options [ '__process_results' ] async_options [ '_process_results' ] = path_to_reference ( _processor ) return async_options | Decode Async options from JSON decoding . |
50,160 | def defaults ( ** options ) : _check_options ( options ) def real_decorator ( function ) : function . _async_options = options @ wraps ( function ) def wrapper ( * args , ** kwargs ) : return function ( * args , ** kwargs ) return wrapper return real_decorator | Set default Async options on the function decorated . |
50,161 | def _persist_result ( self ) : self . _prepare_persistence_engine ( ) return self . _persistence_engine . store_async_result ( self . id , self . result ) | Store this Async s result in persistent storage . |
50,162 | def _initialize_recursion_depth ( self ) : from furious . context import get_current_async recursion_options = self . _options . get ( '_recursion' , { } ) current_depth = recursion_options . get ( 'current' , 0 ) max_depth = recursion_options . get ( 'max' , MAX_DEPTH ) try : executing_async = get_current_async ( ) current_depth = executing_async . recursion_depth if max_depth == MAX_DEPTH : executing_options = executing_async . get_options ( ) . get ( '_recursion' , { } ) max_depth = executing_options . get ( 'max' , max_depth ) except errors . NotInContextError : pass self . update_options ( _recursion = { 'current' : current_depth , 'max' : max_depth } ) | Ensure recursion info is initialized if not initialize it . |
50,163 | def check_recursion_depth ( self ) : from furious . async import MAX_DEPTH recursion_options = self . _options . get ( '_recursion' , { } ) max_depth = recursion_options . get ( 'max' , MAX_DEPTH ) if ( max_depth != DISABLE_RECURSION_CHECK and self . recursion_depth > max_depth ) : raise errors . AsyncRecursionError ( 'Max recursion depth reached.' ) | Check recursion depth raise AsyncRecursionError if too deep . |
50,164 | def _update_job ( self , target , args , kwargs ) : target_path , options = get_function_path_and_options ( target ) assert isinstance ( args , ( tuple , list ) ) or args is None assert isinstance ( kwargs , dict ) or kwargs is None if options : self . update_options ( ** options ) self . _options [ 'job' ] = ( target_path , args , kwargs ) | Specify the function this async job is to execute when run . |
50,165 | def set_execution_context ( self , execution_context ) : if self . _execution_context : raise errors . AlreadyInContextError self . _execution_context = execution_context | Set the ExecutionContext this async is executing under . |
50,166 | def update_options ( self , ** options ) : _check_options ( options ) if 'persistence_engine' in options : options [ 'persistence_engine' ] = reference_to_path ( options [ 'persistence_engine' ] ) if 'id' in options : self . _id = options [ 'id' ] self . _options . update ( options ) | Safely update this async job s configuration options . |
50,167 | def to_task ( self ) : from google . appengine . api . taskqueue import Task from google . appengine . api . taskqueue import TaskRetryOptions self . _increment_recursion_level ( ) self . check_recursion_depth ( ) url = "%s/%s" % ( ASYNC_ENDPOINT , self . function_path ) kwargs = { 'url' : url , 'headers' : self . get_headers ( ) . copy ( ) , 'payload' : json . dumps ( self . to_dict ( ) ) } kwargs . update ( copy . deepcopy ( self . get_task_args ( ) ) ) retry_options = copy . deepcopy ( DEFAULT_RETRY_OPTIONS ) retry_options . update ( kwargs . pop ( 'retry_options' , { } ) ) kwargs [ 'retry_options' ] = TaskRetryOptions ( ** retry_options ) return Task ( ** kwargs ) | Return a task object representing this async job . |
50,168 | def start ( self , transactional = False , async = False , rpc = None ) : from google . appengine . api import taskqueue task = self . to_task ( ) queue = taskqueue . Queue ( name = self . get_queue ( ) ) retry_transient = self . _options . get ( 'retry_transient_errors' , True ) retry_delay = self . _options . get ( 'retry_delay' , RETRY_SLEEP_SECS ) add = queue . add if async : add = partial ( queue . add_async , rpc = rpc ) try : ret = add ( task , transactional = transactional ) except taskqueue . TransientError : if transactional or not retry_transient : raise time . sleep ( retry_delay ) ret = add ( task , transactional = transactional ) except ( taskqueue . TaskAlreadyExistsError , taskqueue . TombstonedTaskError ) : return return ret | Insert the task into the requested queue default if non given . |
50,169 | def from_dict ( cls , async ) : async_options = decode_async_options ( async ) target , args , kwargs = async_options . pop ( 'job' ) return cls ( target , args , kwargs , ** async_options ) | Return an async job from a dict output by Async . to_dict . |
50,170 | def _prepare_persistence_engine ( self ) : if self . _persistence_engine : return persistence_engine = self . _options . get ( 'persistence_engine' ) if persistence_engine : self . _persistence_engine = path_to_reference ( persistence_engine ) return from furious . config import get_default_persistence_engine self . _persistence_engine = get_default_persistence_engine ( ) | Load the specified persistence engine or the default if none is set . |
50,171 | def _get_context_id ( self ) : from furious . context import get_current_context context_id = self . _options . get ( 'context_id' ) if context_id : return context_id try : context = get_current_context ( ) except errors . NotInContextError : context = None self . update_options ( context_id = None ) if context : context_id = context . id self . update_options ( context_id = context_id ) return context_id | If this async is in a context set the context id . |
50,172 | def full_id ( self ) : full_id = "" if self . parent_id : full_id = ":" . join ( [ self . parent_id , self . id ] ) else : full_id = self . id if self . context_id : full_id = "|" . join ( [ full_id , self . context_id ] ) return full_id | Return the full_id for this Async . Consists of the parent id id and context id . |
50,173 | def _increment_recursion_level ( self ) : self . _initialize_recursion_depth ( ) recursion_options = self . _options . get ( '_recursion' , { } ) current_depth = recursion_options . get ( 'current' , 0 ) + 1 max_depth = recursion_options . get ( 'max' , MAX_DEPTH ) self . update_options ( _recursion = { 'current' : current_depth , 'max' : max_depth } ) | Increment current_depth based on either defaults or the enclosing Async . |
50,174 | def context_id ( self ) : if not self . _context_id : self . _context_id = self . _get_context_id ( ) self . update_options ( context_id = self . _context_id ) return self . _context_id | Return this Async s Context Id if it exists . |
50,175 | def _payload_to_dict ( self ) : if self . status != self . ERROR or not self . payload : return self . payload import traceback return { "error" : self . payload . error , "args" : self . payload . args , "traceback" : traceback . format_exception ( * self . payload . traceback ) } | When an error status the payload is holding an AsyncException that is converted to a serializable dict . |
50,176 | def put ( func : Callable , allowed_exceptions : List = None , title : str = None , req_obj_type : Callable = None ) -> HTTPMethod : return HTTPMethod ( 'put' , func , allowed_exceptions = allowed_exceptions , title = title , req_obj_type = req_obj_type ) | Returns a HTTPMethod instance to create a PUT route . |
50,177 | def create_http_method ( logic : Callable , http_method : str , handle_http : Callable , before : Callable = None , after : Callable = None ) -> Callable : @ functools . wraps ( logic ) def fn ( handler , * args , ** kwargs ) : if before is not None and callable ( before ) : before ( ) result = handle_http ( handler , args , kwargs , logic ) if after is not None and callable ( after ) : after ( result ) return result return fn | Create a handler method to be used in a handler class . |
50,178 | def get_handler_name ( route : Route , logic : Callable ) -> str : if route . handler_name is not None : return route . handler_name if any ( m for m in route . methods if m . method . lower ( ) == 'post' ) : if route . heading != 'API' : return '{}ListHandler' . format ( get_valid_class_name ( route . heading ) ) return '{}ListHandler' . format ( get_valid_class_name ( logic . __name__ ) ) if route . heading != 'API' : return '{}Handler' . format ( get_valid_class_name ( route . heading ) ) return '{}Handler' . format ( get_valid_class_name ( logic . __name__ ) ) | Gets the handler name . |
50,179 | def create_routes ( routes : Tuple [ HTTPMethod ] , handle_http : Callable , default_base_handler_class : Any ) -> List [ Tuple [ str , Any ] ] : created_routes = [ ] all_handler_names = [ ] for r in routes : handler = None if r . base_handler_class is not None : base_handler_class = r . base_handler_class else : base_handler_class = default_base_handler_class handler_name = get_handler_name ( r , r . methods [ 0 ] . logic ) if handler_name in all_handler_names : handler_name = '{}{}' . format ( handler_name , len ( all_handler_names ) ) all_handler_names . append ( handler_name ) for method in r . methods : logic = method . logic http_method = method . method http_func = create_http_method ( logic , http_method , handle_http , before = r . before , after = r . after ) handler_methods_and_properties = { '__name__' : handler_name , '_doctor_heading' : r . heading , 'methods' : set ( [ http_method . upper ( ) ] ) , http_method : http_func , } if handler is None : handler = type ( handler_name , ( base_handler_class , ) , handler_methods_and_properties ) else : setattr ( handler , http_method , http_func ) if hasattr ( handler , 'methods' ) : handler . methods . add ( http_method . upper ( ) ) created_routes . append ( ( r . route , handler ) ) return created_routes | Creates handler routes from the provided routes . |
50,180 | def parse_args ( options = { } , * args , ** kwds ) : parser_options = ParserOptions ( options ) parser_input = ParserInput ( args , kwds ) parser = Parser ( parser_options , parser_input ) parser . parse ( ) return parser . output_data | Parser of arguments . |
50,181 | def simple_state_machine ( ) : from random import random from furious . async import Async number = random ( ) logging . info ( 'Generating a number... %s' , number ) if number > 0.25 : logging . info ( 'Continuing to do stuff.' ) return Async ( target = simple_state_machine ) return number | Pick a number if it is more than some cuttoff continue the chain . |
50,182 | def render_attrs ( attrs ) : if attrs is not None : def parts ( ) : for key , value in sorted ( attrs . items ( ) ) : if value is None : continue if value is True : yield '%s' % ( key , ) continue if key == 'class' and isinstance ( value , dict ) : if not value : continue value = render_class ( value ) if key == 'style' and isinstance ( value , dict ) : if not value : continue value = render_style ( value ) yield '%s="%s"' % ( key , ( '%s' % value ) . replace ( '"' , '"' ) ) return mark_safe ( ' %s' % ' ' . join ( parts ( ) ) ) return '' | Render HTML attributes or return if no attributes needs to be rendered . |
50,183 | def context_completion_checker ( async ) : store_async_marker ( async . id , async . result . status if async . result else - 1 ) logging . debug ( "Async check completion for: %s" , async . context_id ) current_queue = _get_current_queue ( ) from furious . async import Async logging . debug ( "Completion Check queue:%s" , current_queue ) Async ( _completion_checker , queue = current_queue , args = ( async . id , async . context_id ) ) . start ( ) return True | Persist async marker and async the completion check |
50,184 | def _completion_checker ( async_id , context_id ) : if not context_id : logging . debug ( "Context for async %s does not exist" , async_id ) return context = FuriousContext . from_id ( context_id ) marker = FuriousCompletionMarker . get_by_id ( context_id ) if marker and marker . complete : logging . info ( "Context %s already complete" % context_id ) return True task_ids = context . task_ids if async_id in task_ids : task_ids . remove ( async_id ) logging . debug ( "Loaded context." ) logging . debug ( task_ids ) done , has_errors = _check_markers ( task_ids ) if not done : return False _mark_context_complete ( marker , context , has_errors ) return True | Check if all Async jobs within a Context have been run . |
50,185 | def _check_markers ( task_ids , offset = 10 ) : shuffle ( task_ids ) has_errors = False for index in xrange ( 0 , len ( task_ids ) , offset ) : keys = [ ndb . Key ( FuriousAsyncMarker , id ) for id in task_ids [ index : index + offset ] ] markers = ndb . get_multi ( keys ) if not all ( markers ) : logging . debug ( "Not all Async's complete" ) return False , None has_errors = not all ( ( marker . success for marker in markers ) ) return True , has_errors | Returns a flag for markers being found for the task_ids . If all task ids have markers True will be returned . Otherwise it will return False as soon as a None result is hit . |
50,186 | def _mark_context_complete ( marker , context , has_errors ) : current = None if marker : current = marker . key . get ( ) if not current : return False if current and current . complete : return False current . complete = True current . has_errors = has_errors current . put ( ) _insert_post_complete_tasks ( context ) return True | Transactionally complete the context . |
50,187 | def _insert_post_complete_tasks ( context ) : logging . debug ( "Context %s is complete." , context . id ) context . exec_event_handler ( 'complete' , transactional = True ) try : from furious . async import Async Async ( _cleanup_markers , queue = CLEAN_QUEUE , args = [ context . id , context . task_ids ] , task_args = { 'countdown' : CLEAN_DELAY } ) . start ( ) except : pass | Insert the event s asyncs and cleanup tasks . |
50,188 | def _cleanup_markers ( context_id , task_ids ) : logging . debug ( "Cleanup %d markers for Context %s" , len ( task_ids ) , context_id ) delete_entities = [ ndb . Key ( FuriousAsyncMarker , id ) for id in task_ids ] delete_entities . append ( ndb . Key ( FuriousCompletionMarker , context_id ) ) ndb . delete_multi ( delete_entities ) logging . debug ( "Markers cleaned." ) | Delete the FuriousAsyncMarker entities corresponding to ids . |
50,189 | def store_context ( context ) : logging . debug ( "Attempting to store Context %s." , context . id ) entity = FuriousContext . from_context ( context ) marker = FuriousCompletionMarker ( id = context . id ) key , _ = ndb . put_multi ( ( entity , marker ) ) logging . debug ( "Stored Context with key: %s." , key ) return key | Persist a furious . context . Context object to the datastore by loading it into a FuriousContext ndb . Model . |
50,190 | def store_async_result ( async_id , async_result ) : logging . debug ( "Storing result for %s" , async_id ) key = FuriousAsyncMarker ( id = async_id , result = json . dumps ( async_result . to_dict ( ) ) , status = async_result . status ) . put ( ) logging . debug ( "Setting Async result %s using marker: %s." , async_result , key ) | Persist the Async s result to the datastore . |
50,191 | def store_async_marker ( async_id , status ) : logging . debug ( "Attempting to mark Async %s complete." , async_id ) marker = FuriousAsyncMarker . get_by_id ( async_id ) if marker : logging . debug ( "Marker already exists for %s." , async_id ) return key = FuriousAsyncMarker ( id = async_id , status = status ) . put ( ) logging . debug ( "Marked Async complete using marker: %s." , key ) | Persist a marker indicating the Async ran to the datastore . |
50,192 | def iter_context_results ( context , batch_size = 10 , task_cache = None ) : for futures in iget_batches ( context . task_ids , batch_size = batch_size ) : for key , future in futures : task = future . get_result ( ) if task_cache is not None : task_cache [ key . id ( ) ] = task yield key . id ( ) , task | Yield out the results found on the markers for the context task ids . |
50,193 | def iget_batches ( task_ids , batch_size = 10 ) : make_key = lambda _id : ndb . Key ( FuriousAsyncMarker , _id ) for keys in i_batch ( imap ( make_key , task_ids ) , batch_size ) : yield izip ( keys , ndb . get_multi_async ( keys ) ) | Yield out a map of the keys and futures in batches of the batch size passed in . |
50,194 | def i_batch ( items , size ) : for items_batch in iter ( lambda : tuple ( islice ( items , size ) ) , tuple ( ) ) : yield items_batch | Generator that iteratively batches items to a max size and consumes the items as each batch is yielded . |
50,195 | def from_context ( cls , context ) : return cls ( id = context . id , context = context . to_dict ( ) ) | Create a cls entity from a context . |
50,196 | def from_id ( cls , id ) : from furious . context import Context entity = cls . get_by_id ( id ) if not entity : raise FuriousContextNotFoundError ( "Context entity not found for: {}" . format ( id ) ) return Context . from_dict ( entity . context ) | Load a cls entity and instantiate the Context it stores . |
50,197 | def items ( self ) : for key , task in self . _tasks : if not ( task and task . result ) : yield key , None else : yield key , json . loads ( task . result ) [ "payload" ] | Yield the async reuslts for the context . |
50,198 | def values ( self ) : for _ , task in self . _tasks : if not ( task and task . result ) : yield None else : yield json . loads ( task . result ) [ "payload" ] | Yield the async reuslt values for the context . |
50,199 | def aiidalab_display ( obj , downloadable = True , ** kwargs ) : from aiidalab_widgets_base import aiida_visualizers try : visualizer = getattr ( aiida_visualizers , AIIDA_VISUALIZER_MAPPING [ obj . type ] ) display ( visualizer ( obj , downloadable = downloadable ) , ** kwargs ) except KeyError : display ( obj , ** kwargs ) | Display AiiDA data types in Jupyter notebooks . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.