idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,900
def map_value ( self , value , gid ) : base_gid = self . base_gid_pattern . search ( gid ) . group ( 1 ) if self . anonymyze : try : if value in self . _maps [ base_gid ] : return self . _maps [ base_gid ] [ value ] else : k = ( len ( self . _maps [ base_gid ] ) + 1 ) % self . mapmax new_item = u'{0}_{1:0{2}d}' . format ( base_gid . upper ( ) , k , self . mapexp ) self . _maps [ base_gid ] [ value ] = new_item return new_item except KeyError : return value elif base_gid in [ 'client' , 'mail' , 'from' , 'rcpt' , 'user' ] and self . ip_lookup : ip_match = self . ip_pattern . search ( value ) if ip_match is None : return value host = self . gethost ( ip_match . group ( 1 ) ) if host == ip_match . group ( 1 ) or value . startswith ( host ) : return value return u'' . join ( [ value [ : ip_match . start ( 1 ) ] , self . gethost ( ip_match . group ( 1 ) ) , value [ ip_match . end ( 1 ) : ] ] ) elif ( base_gid == 'user' or base_gid == 'uid' ) and self . uid_lookup : return self . getuname ( value ) else : return value
Return the value for a group id applying requested mapping . Map only groups related to a filter ie when the basename of the group is identical to the name of a filter .
13,901
def match_to_dict ( self , match , gids ) : values = { } for gid in gids : try : values [ gid ] = self . map_value ( match . group ( gid ) , gid ) except IndexError : pass return values
Map values from match into a dictionary .
13,902
def match_to_string ( self , match , gids , values = None ) : s = match . string parts = [ ] k = 0 for gid in sorted ( gids , key = lambda x : gids [ x ] ) : if values is None : try : value = self . map_value ( match . group ( gid ) , gid ) parts . append ( s [ k : match . start ( gid ) ] ) parts . append ( value ) k = match . end ( gid ) except IndexError : continue elif gid in values : parts . append ( s [ k : match . start ( gid ) ] ) parts . append ( values [ gid ] ) k = match . end ( gid ) parts . append ( s [ k : ] ) return u"" . join ( parts )
Return the mapped string from match object . If a dictionary of values is provided then use it to build the string .
13,903
def gethost ( self , ip_addr ) : try : if ip_addr [ : 7 ] == '::ffff:' : ip_addr = ip_addr [ 7 : ] except TypeError : pass if ip_addr [ 0 ] in string . letters : return ip_addr try : return self . hostsmap [ ip_addr ] except KeyError : pass try : name = socket . gethostbyaddr ( ip_addr ) [ 0 ] except socket . error : name = ip_addr self . hostsmap [ ip_addr ] = name return name
Do reverse lookup on an ip address
13,904
def getuname ( self , uid ) : uid = int ( uid ) try : return self . uidsmap [ uid ] except KeyError : pass try : name = pwd . getpwuid ( uid ) [ 0 ] except ( KeyError , AttributeError ) : name = "uid=%d" % uid self . uidsmap [ uid ] = name return name
Get the username of a given uid .
13,905
def redirect ( endpoint , ** kw ) : _endpoint = None if isinstance ( endpoint , six . string_types ) : _endpoint = endpoint if "/" in endpoint : return f_redirect ( endpoint ) else : for r in Mocha . _app . url_map . iter_rules ( ) : _endpoint = endpoint if 'GET' in r . methods and endpoint in r . endpoint : _endpoint = r . endpoint break else : if isinstance ( endpoint , Mocha ) : fn = sys . _getframe ( ) . f_back . f_code . co_name endpoint = getattr ( endpoint , fn ) if is_method ( endpoint ) : _endpoint = _get_action_endpoint ( endpoint ) if not _endpoint : _endpoint = _build_endpoint_route_name ( endpoint ) if _endpoint : return f_redirect ( url_for ( _endpoint , ** kw ) ) else : raise exceptions . MochaError ( "Invalid endpoint" )
Redirect allow to redirect dynamically using the classes methods without knowing the right endpoint . Expecting all endpoint have GET as method it will try to pick the first match based on the endpoint provided or the based on the Rule map_url
13,906
def get_true_argspec ( method ) : argspec = inspect . getargspec ( method ) args = argspec [ 0 ] if args and args [ 0 ] == 'self' : return argspec if hasattr ( method , '__func__' ) : method = method . __func__ if not hasattr ( method , '__closure__' ) or method . __closure__ is None : raise DecoratorCompatibilityError closure = method . __closure__ for cell in closure : inner_method = cell . cell_contents if inner_method is method : continue if not inspect . isfunction ( inner_method ) and not inspect . ismethod ( inner_method ) : continue true_argspec = get_true_argspec ( inner_method ) if true_argspec : return true_argspec
Drills through layers of decorators attempting to locate the actual argspec for the method .
13,907
def setup_installed_apps ( cls ) : cls . _installed_apps = cls . _app . config . get ( "INSTALLED_APPS" , [ ] ) if cls . _installed_apps : def import_app ( module , props = { } ) : _ = werkzeug . import_string ( module ) setattr ( _ , "__options__" , utils . dict_dot ( props ) ) for k in cls . _installed_apps : if isinstance ( k , six . string_types ) : import_app ( k , { } ) elif isinstance ( k , tuple ) : import_app ( k [ 0 ] , k [ 1 ] ) elif isinstance ( k , list ) : for t in k : import_app ( t [ 0 ] , t [ 1 ] )
To import 3rd party applications along with associated properties
13,908
def _add_asset_bundle ( cls , path ) : f = "%s/assets.yml" % path if os . path . isfile ( f ) : cls . _asset_bundles . add ( f )
Add a webassets bundle yml file
13,909
def _setup_db ( cls ) : uri = cls . _app . config . get ( "DB_URL" ) if uri : db . connect__ ( uri , cls . _app )
Setup the DB connection if DB_URL is set
13,910
def parse_options ( cls , options ) : options = options . copy ( ) subdomain = options . pop ( 'subdomain' , None ) endpoint = options . pop ( 'endpoint' , None ) return subdomain , endpoint , options ,
Extracts subdomain and endpoint values from the options dict and returns them along with a new dict without those values .
13,911
def get_base_route ( cls ) : base_route = cls . __name__ . lower ( ) if cls . base_route is not None : base_route = cls . base_route base_rule = parse_rule ( base_route ) cls . base_args = [ r [ 2 ] for r in base_rule ] return base_route . strip ( "/" )
Returns the route base to use for the current class .
13,912
def find_gene_by_name ( self , gene_name : str ) -> Gene : for gene in self . genes : if gene . name == gene_name : return gene raise AttributeError ( f'gene "{gene_name}" does not exist' )
Find and return a gene in the influence graph with the given name . Raise an AttributeError if there is no gene in the graph with the given name .
13,913
def find_multiplex_by_name ( self , multiplex_name : str ) -> Multiplex : for multiplex in self . multiplexes : if multiplex . name == multiplex_name : return multiplex raise AttributeError ( f'multiplex "{multiplex_name}" does not exist' )
Find and return a multiplex in the influence graph with the given name . Raise an AttributeError if there is no multiplex in the graph with the given name .
13,914
def all_states ( self ) -> Tuple [ State , ... ] : return tuple ( self . _transform_list_of_states_to_state ( states ) for states in self . _cartesian_product_of_every_states_of_each_genes ( ) )
Return all the possible states of this influence graph .
13,915
def _cartesian_product_of_every_states_of_each_genes ( self ) -> Tuple [ Tuple [ int , ... ] ] : if not self . genes : return ( ) return tuple ( product ( * [ gene . states for gene in self . genes ] ) )
Private method which return the cartesian product of the states of the genes in the model . It represents all the possible state for a given model .
13,916
def _transform_list_of_states_to_state ( self , state : List [ int ] ) -> State : return State ( { gene : state [ i ] for i , gene in enumerate ( self . genes ) } )
Private method which transform a list which contains the state of the gene in the models to a State object .
13,917
def read_sha1 ( file_path , buf_size = None , start_byte = 0 , read_size = None , extra_hashers = [ ] , ) : read_size = read_size or os . stat ( file_path ) . st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib . sha1 ( ) while data_read < read_size : with open ( file_path , 'rb' , buffering = 0 ) as f : f . seek ( start_byte ) data = f . read ( min ( buf_size , read_size - data_read ) ) assert ( len ( data ) > 0 ) total_sha1 . update ( data ) for hasher in extra_hashers : hasher . update ( data ) data_read += len ( data ) start_byte += len ( data ) assert ( data_read == read_size ) return total_sha1
Determines the sha1 hash of a file in chunks to prevent loading the entire file at once into memory
13,918
def verify_uploaded_file ( self , destination_folder_id , source_path , verbose = True , ) : source_file_size = os . stat ( source_path ) . st_size total_part_size = 0 file_position = 0 uploaded_box_file_ids = self . find_file ( destination_folder_id , os . path . basename ( source_path ) ) total_sha1 = hashlib . sha1 ( ) for i , file_id in enumerate ( uploaded_box_file_ids ) : file_info = self . client . file ( file_id = file_id ) . get ( ) uploaded_sha1 = file_info . response_object [ 'sha1' ] uploaded_size = file_info . response_object [ 'size' ] part_sha1 = read_sha1 ( source_path , start_byte = file_position , read_size = uploaded_size , extra_hashers = [ total_sha1 ] ) if part_sha1 . hexdigest ( ) != uploaded_sha1 : print ( '\n' ) print ( 'Part sha1: ' + part_sha1 . hexdigest ( ) ) print ( 'Uploaded sha1: ' + uploaded_sha1 ) print ( 'Sha1 hash of uploaded file {0} ({1}) does not match' . format ( file_info . response_object [ 'name' ] , file_id ) ) return False file_position += uploaded_size total_part_size += uploaded_size if len ( uploaded_box_file_ids ) > 1 : print ( 'Finished verifying part {0} of {1} of {2}' . format ( i + 1 , len ( uploaded_box_file_ids ) , file_id ) ) assert ( source_file_size == total_part_size ) if verbose : print ( 'Verified uploaded file {0} ({1}) with sha1: {2}' . format ( source_path , file_id , total_sha1 . hexdigest ( ) ) ) return True
Verifies the integrity of a file uploaded to Box
13,919
def handle_resourcelist ( ltext , ** kwargs ) : base = kwargs . get ( 'base' , VERSA_BASEIRI ) model = kwargs . get ( 'model' ) iris = ltext . strip ( ) . split ( ) newlist = model . generate_resource ( ) for i in iris : model . add ( newlist , VERSA_BASEIRI + 'item' , I ( iri . absolutize ( i , base ) ) ) return newlist
A helper that converts lists of resources from a textual format such as Markdown including absolutizing relative IRIs
13,920
def handle_resourceset ( ltext , ** kwargs ) : fullprop = kwargs . get ( 'fullprop' ) rid = kwargs . get ( 'rid' ) base = kwargs . get ( 'base' , VERSA_BASEIRI ) model = kwargs . get ( 'model' ) iris = ltext . strip ( ) . split ( ) for i in iris : model . add ( rid , fullprop , I ( iri . absolutize ( i , base ) ) ) return None
A helper that converts sets of resources from a textual format such as Markdown including absolutizing relative IRIs
13,921
def create_cache_database ( self ) : conn = sqlite3 . connect ( self . database ) conn . text_factory = str c = conn . cursor ( ) c . execute ( ) c . execute ( ) c . execute ( ) conn . commit ( ) conn . close ( )
Create a new SQLite3 database for use with Cache objects
13,922
def __exists_row_not_too_old ( self , row ) : if row is None : return False record_time = dateutil . parser . parse ( row [ 2 ] ) now = datetime . datetime . now ( dateutil . tz . gettz ( ) ) age = ( record_time - now ) . total_seconds ( ) if age > self . max_age : return False return True
Check if the given row exists and is not too old
13,923
def has_item ( self , item_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM items WHERE url=?" , ( str ( item_url ) , ) ) row = c . fetchone ( ) c . close ( ) return self . __exists_row_not_too_old ( row )
Check if the metadata for the given item is present in the cache
13,924
def has_document ( self , doc_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) row = c . fetchone ( ) c . close ( ) return self . __exists_row_not_too_old ( row )
Check if the content of the given document is present in the cache
13,925
def get_document ( self , doc_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) row = c . fetchone ( ) c . close ( ) if row is None : raise ValueError ( "Item not present in cache" ) file_path = row [ 1 ] try : with open ( file_path , 'rb' ) as f : return f . read ( ) except IOError as e : raise IOError ( "Error reading file " + file_path + " to retrieve document " + doc_url + ": " + e . message )
Retrieve the content for the given document from the cache .
13,926
def get_primary_text ( self , item_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM primary_texts WHERE item_url=?" , ( str ( item_url ) , ) ) row = c . fetchone ( ) c . close ( ) if row is None : raise ValueError ( "Item not present in cache" ) return row [ 1 ]
Retrieve the primary text for the given item from the cache .
13,927
def add_item ( self , item_url , item_metadata ) : c = self . conn . cursor ( ) c . execute ( "DELETE FROM items WHERE url=?" , ( str ( item_url ) , ) ) self . conn . commit ( ) c . execute ( "INSERT INTO items VALUES (?, ?, ?)" , ( str ( item_url ) , item_metadata , self . __now_iso_8601 ( ) ) ) self . conn . commit ( ) c . close ( )
Add the given item to the cache database updating the existing metadata if the item is already present
13,928
def add_document ( self , doc_url , data ) : file_path = self . __generate_filepath ( ) with open ( file_path , 'wb' ) as f : f . write ( data ) c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) for row in c . fetchall ( ) : old_file_path = row [ 1 ] if os . path . isfile ( old_file_path ) : os . unlink ( old_file_path ) c . execute ( "DELETE FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) self . conn . commit ( ) c . execute ( "INSERT INTO documents VALUES (?, ?, ?)" , ( str ( doc_url ) , file_path , self . __now_iso_8601 ( ) ) ) self . conn . commit ( ) c . close ( )
Add the given document to the cache updating the existing content data if the document is already present
13,929
def add_primary_text ( self , item_url , primary_text ) : c = self . conn . cursor ( ) c . execute ( "DELETE FROM primary_texts WHERE item_url=?" , ( str ( item_url ) , ) ) self . conn . commit ( ) c . execute ( "INSERT INTO primary_texts VALUES (?, ?, ?)" , ( str ( item_url ) , primary_text , self . __now_iso_8601 ( ) ) ) self . conn . commit ( ) c . close ( )
Add the given primary text to the cache database updating the existing record if the primary text is already present
13,930
async def profile ( self , ctx , tag ) : if not self . check_valid_tag ( tag ) : return await ctx . send ( 'Invalid tag!' ) profile = await self . cr . get_profile ( tag ) em = discord . Embed ( color = 0x00FFFFF ) em . set_author ( name = str ( profile ) , icon_url = profile . clan_badge_url ) em . set_thumbnail ( url = profile . arena . badge_url ) for attr in self . cdir ( profile ) : value = getattr ( profile , attr ) if not callable ( value ) : em . add_field ( name = attr . replace ( '_' ) . title ( ) , value = str ( value ) ) await ctx . send ( embed = em )
Example command for use inside a discord bot cog .
13,931
def write_remaining ( self ) : if not self . results : return with db . execution_context ( ) : with db . atomic ( ) : Result . insert_many ( self . results ) . execute ( ) del self . results [ : ]
Write the remaning stack content
13,932
def configure ( project_path , config_file = None ) : if config_file is None : config_file = os . path . join ( project_path , 'config.json' ) try : with open ( config_file , 'r' ) as f : config = json . load ( f ) except ValueError as e : raise OctConfigurationError ( "Configuration setting failed with error: %s" % e ) for key in REQUIRED_CONFIG_KEYS : if key not in config : raise OctConfigurationError ( "Error: the required configuration key %s is not define" % key ) return config
Get the configuration of the test and return it as a config object
13,933
def configure_for_turret ( project_name , config_file ) : config = configure ( project_name , config_file ) for key in WARNING_CONFIG_KEYS : if key not in config : print ( "WARNING: %s configuration key not present, the value will be set to default value" % key ) common_config = { 'hq_address' : config . get ( 'hq_address' , '127.0.0.1' ) , 'hq_publisher' : config . get ( 'publish_port' , 5000 ) , 'hq_rc' : config . get ( 'rc_port' , 5001 ) , 'turrets_requirements' : config . get ( 'turrets_requirements' , [ ] ) } configs = [ ] for turret in config [ 'turrets' ] : if isinstance ( turret , six . string_types ) : turret = load_turret_config ( project_name , turret ) turret . update ( common_config ) turret . update ( config . get ( 'extra_turret_config' , { } ) ) configs . append ( turret ) return configs
Load the configuration file in python dict and check for keys that will be set to default value if not present
13,934
def get_db_uri ( config , output_dir ) : db_config = config . get ( "results_database" , { "db_uri" : "default" } ) if db_config [ 'db_uri' ] == 'default' : return os . path . join ( output_dir , "results.sqlite" ) return db_config [ 'db_uri' ]
Process results_database parameters in config to format them for set database function
13,935
def update ( self ) : self . json = c . get_document ( self . uri . did ) . json ( ) self . e_list = c . element_list ( self . uri . as_dict ( ) ) . json ( )
All client calls to update this instance with Onshape .
13,936
def find_element ( self , name , type = ElementType . ANY ) : for e in self . e_list : if type . value and not e [ 'elementType' ] == type : continue if e [ "name" ] == name : uri = self . uri uri . eid = e [ "id" ] return uri
Find an elemnent in the document with the given name - could be a PartStudio Assembly or blob .
13,937
def beta_array ( C , HIGHSCALE , * args , ** kwargs ) : beta_odict = beta ( C , HIGHSCALE , * args , ** kwargs ) return np . hstack ( [ np . asarray ( b ) . ravel ( ) for b in beta_odict . values ( ) ] )
Return the beta functions of all SM parameters and SMEFT Wilson coefficients as a 1D numpy array .
13,938
def _search ( self , query , search_term ) : criterias = mongoengine . Q ( ) rel_criterias = mongoengine . Q ( ) terms = shlex . split ( search_term ) if len ( terms ) == 1 and re . match ( RE_OBJECTID , terms [ 0 ] ) : q = query . filter ( id = bson . ObjectId ( terms [ 0 ] ) ) if q . count ( ) == 1 : return q for term in terms : op , term = parse_like_term ( term ) if op == 'contains' : op = 'icontains' criteria = mongoengine . Q ( ) for field in self . _search_fields : if isinstance ( field , mongoengine . fields . ReferenceField ) : rel_model = field . document_type rel_fields = ( getattr ( self , 'column_searchable_refs' , { } ) . get ( field . name , { } ) . get ( 'fields' , [ 'id' ] ) ) if rel_fields == [ 'id' ] and not re . match ( RE_OBJECTID , term ) : continue ids = [ o . id for o in search_relative_field ( rel_model , rel_fields , term ) ] rel_criterias |= mongoengine . Q ( ** { '%s__in' % field . name : ids } ) elif isinstance ( field , mongoengine . fields . ListField ) : if not isinstance ( field . field , mongoengine . fields . ReferenceField ) : continue rel_model = field . field . document_type_obj rel_fields = ( getattr ( self , 'column_searchable_refs' , { } ) . get ( field . name , { } ) . get ( 'fields' , 'id' ) ) ids = [ o . id for o in search_relative_field ( rel_model , rel_fields , term ) ] rel_criterias |= mongoengine . Q ( ** { '%s__in' % field . name : ids } ) else : flt = { '%s__%s' % ( field . name , op ) : term } q = mongoengine . Q ( ** flt ) criteria |= q criterias &= criteria return query . filter ( criterias | rel_criterias )
Improved search between words .
13,939
def set_data_from_iterable ( self , frames , values , labels = None ) : if not isinstance ( frames , collections . Iterable ) : raise TypeError , "frames must be an iterable" if not isinstance ( values , collections . Iterable ) : raise TypeError , "values must be an iterable" assert ( len ( frames ) == len ( values ) ) self . frames = frames self . values = values if labels is None : self . label2int [ 'New Point' ] = 0 self . int2label [ 0 ] = 'New Point' self . labels = [ 0 for i in xrange ( len ( frames ) ) ] else : if not isinstance ( labels , collections . Iterable ) : raise TypeError , "labels must be an iterable" for l in labels : if l not in self . label2int : self . label2int [ l ] = len ( self . label2int ) self . int2label [ len ( self . int2label ) ] = l self . labels . append ( self . label2int [ l ] )
Initialize a dataset structure from iterable parameters
13,940
def writexml ( self , writer , indent = "" , addindent = "" , newl = "" ) : writer . write ( '%s<dataset id="%s" dimensions="%s">%s' % ( indent , self . datasetid , self . dimensions , newl ) ) indent2 = indent + addindent for l , x , y in zip ( self . labels , self . frames , self . values ) : writer . write ( '%s<point label="%s" frame="%d" value="%f"/>%s' % ( indent2 , self . int2label [ l ] , x , y , newl ) ) writer . write ( '%s</dataset>%s' % ( indent , newl ) )
Write the continuous dataset using sonic visualiser xml conventions
13,941
def gaus_pdf ( x , mean , std ) : return exp ( - ( ( x - mean ) / std ) ** 2 / 2 ) / sqrt ( 2 * pi ) / std
Gaussian distribution s probability density function .
13,942
def logistic ( x , x0 , k , L ) : return L / ( 1 + exp ( - k * ( x - x0 ) ) )
Logistic function .
13,943
def populate_menv ( menv , agent_cls_name , log_folder ) : gs = menv . gs n_agents = gs [ 0 ] * gs [ 1 ] n_slaves = len ( menv . addrs ) logger . info ( "Populating {} with {} agents" . format ( HOST , n_agents * n_slaves ) ) run ( menv . populate ( agent_cls_name , n_agents , log_folder = log_folder ) ) logger . info ( "Populating complete." )
Populate given multiprocessing grid environment with agents .
13,944
def get_slave_addrs ( mgr_addr , N ) : return [ ( HOST , p ) for p in range ( mgr_addr + 1 , mgr_addr + 1 + N ) ]
Get ports for the slave environments .
13,945
def weighted_average ( rule , artifact ) : e = 0 w = 0 for i in range ( len ( rule . R ) ) : r = rule . R [ i ] ( artifact ) if r is not None : e += r * rule . W [ i ] w += abs ( rule . W [ i ] ) if w == 0.0 : return 0.0 return e / w
Evaluate artifact s value to be weighted average of values returned by rule s subrules .
13,946
def minimum ( rule , artifact ) : m = 1.0 for i in range ( len ( rule . R ) ) : e = rule . R [ i ] ( artifact ) if e is not None : if e < m : m = e return m
Evaluate artifact s value to be minimum of values returned by rule s subrules .
13,947
def add_subrule ( self , subrule , weight ) : if not issubclass ( subrule . __class__ , ( Rule , RuleLeaf ) ) : raise TypeError ( "Rule's class must be (subclass of) {} or {}, got " "{}." . format ( Rule , RuleLeaf , subrule . __class__ ) ) self . __domains = set . union ( self . __domains , subrule . domains ) self . R . append ( subrule ) self . W . append ( weight )
Add subrule to the rule .
13,948
def parse_seqres ( self , pdb ) : seqresre = re . compile ( "SEQRES" ) seqreslines = [ line for line in pdb . lines if seqresre . match ( line ) ] for line in seqreslines : chain = line [ 11 ] resnames = line [ 19 : 70 ] . strip ( ) self . setdefault ( chain , [ ] ) self [ chain ] += resnames . split ( )
Parse the SEQRES entries into the object
13,949
def parse_atoms ( self , pdb ) : atomre = re . compile ( "ATOM" ) atomlines = [ line for line in pdb . lines if atomre . match ( line ) ] chainresnums = { } for line in atomlines : chain = line [ 21 ] resname = line [ 17 : 20 ] resnum = line [ 22 : 27 ] chainresnums . setdefault ( chain , [ ] ) if resnum in chainresnums [ chain ] : assert self [ chain ] [ chainresnums [ chain ] . index ( resnum ) ] == resname else : if resnum [ - 1 ] == ' ' : self . setdefault ( chain , [ ] ) self [ chain ] += [ resname ] chainresnums [ chain ] += [ resnum ] return chainresnums
Parse the ATOM entries into the object
13,950
def seqres_lines ( self ) : lines = [ ] for chain in self . keys ( ) : seq = self [ chain ] serNum = 1 startidx = 0 while startidx < len ( seq ) : endidx = min ( startidx + 13 , len ( seq ) ) lines += [ "SEQRES %2i %s %4i %s\n" % ( serNum , chain , len ( seq ) , " " . join ( seq [ startidx : endidx ] ) ) ] serNum += 1 startidx += 13 return lines
Generate SEQRES lines representing the contents
13,951
def replace_seqres ( self , pdb , update_atoms = True ) : newpdb = PDB ( ) inserted_seqres = False entries_before_seqres = set ( [ "HEADER" , "OBSLTE" , "TITLE" , "CAVEAT" , "COMPND" , "SOURCE" , "KEYWDS" , "EXPDTA" , "AUTHOR" , "REVDAT" , "SPRSDE" , "JRNL" , "REMARK" , "DBREF" , "SEQADV" ] ) mutated_resids = { } if update_atoms : old_seqs = ChainSequences ( ) chainresnums = old_seqs . parse_atoms ( pdb ) assert self . keys ( ) == old_seqs . keys ( ) for chain in self . keys ( ) : assert len ( self [ chain ] ) == len ( old_seqs [ chain ] ) for i in xrange ( len ( self [ chain ] ) ) : if self [ chain ] [ i ] != old_seqs [ chain ] [ i ] : resid = chain + chainresnums [ chain ] [ i ] mutated_resids [ resid ] = self [ chain ] [ i ] for line in pdb . lines : entry = line [ 0 : 6 ] if ( not inserted_seqres ) and entry not in entries_before_seqres : inserted_seqres = True newpdb . lines += self . seqres_lines ( ) if update_atoms and entry == "ATOM " : resid = line [ 21 : 27 ] atom = line [ 12 : 16 ] . strip ( ) if not mutated_resids . has_key ( resid ) : newpdb . lines += [ line ] else : newpdb . lines += [ line [ : 17 ] + mutated_resids [ resid ] + line [ 20 : ] ] elif entry != "SEQRES" : newpdb . lines += [ line ] if update_atoms : newpdb . remove_nonbackbone_atoms ( mutated_resids . keys ( ) ) return newpdb
Replace SEQRES lines with a new sequence optionally removing mutated sidechains
13,952
def has_host_match ( log_data , hosts ) : hostname = getattr ( log_data , 'host' , None ) if hostname and hostname not in host_cache : for host_pattern in hosts : if host_pattern . search ( hostname ) is not None : host_cache . add ( hostname ) return True else : return False return True
Match the data with a list of hostname patterns . If the log line data doesn t include host information considers the line as matched .
13,953
def run ( self , app ) : GlimLog . info ( 'Glim server started on %s environment' % self . args . env ) try : kwargs = Config . get ( 'app.server.options' ) run ( app . wsgi , host = Config . get ( 'app.server.host' ) , port = Config . get ( 'app.server.port' ) , debug = Config . get ( 'app.server.debugger' ) , reloader = Config . get ( 'app.server.reloader' ) , server = Config . get ( 'app.server.wsgi' ) , ** kwargs ) except Exception as e : print ( traceback . format_exc ( ) ) exit ( )
Function starts the web server given configuration .
13,954
def get_symmetrical_std_devs ( values , ignore_zeros = True ) : pos_stdeviation = get_symmetrical_std_dev ( values , True , ignore_zeros = ignore_zeros ) neg_stdeviation = get_symmetrical_std_dev ( values , False , ignore_zeros = ignore_zeros ) return pos_stdeviation , neg_stdeviation
Takes a list of values and splits it into positive and negative values . For both of these subsets a symmetrical distribution is created by mirroring each value along the origin and the standard deviation for both subsets is returned .
13,955
def get_std_xy_dataset_statistics ( x_values , y_values , expect_negative_correlation = False , STDev_cutoff = 1.0 ) : assert ( len ( x_values ) == len ( y_values ) ) csv_lines = [ 'ID,X,Y' ] + [ ',' . join ( map ( str , [ c + 1 , x_values [ c ] , y_values [ c ] ] ) ) for c in xrange ( len ( x_values ) ) ] data = parse_csv ( csv_lines , expect_negative_correlation = expect_negative_correlation , STDev_cutoff = STDev_cutoff ) assert ( len ( data [ 'predictions' ] ) == 1 ) assert ( 1 in data [ 'predictions' ] ) assert ( data [ 'predictions' ] [ 1 ] [ 'name' ] == 'Y' ) summary_data = data [ 'predictions' ] [ 1 ] stats = { } for spair in field_name_mapper : stats [ spair [ 1 ] ] = summary_data [ spair [ 0 ] ] if stats [ 'std_warnings' ] : stats [ 'std_warnings' ] = '\n' . join ( stats [ 'std_warnings' ] ) else : stats [ 'std_warnings' ] = None return stats
Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab . stats . misc .
13,956
def active_multiplex ( self , state : 'State' ) -> Tuple [ 'Multiplex' ] : return tuple ( multiplex for multiplex in self . multiplexes if multiplex . is_active ( state ) )
Return a tuple of all the active multiplex in the given state .
13,957
def sanitized_name ( self ) : a = re . split ( "[:/]" , self . name ) return "_" . join ( [ i for i in a if len ( i ) > 0 ] )
Sanitized name of the agent used for file and directory creation .
13,958
def get_connections ( self , data = False ) : if data : return self . _connections return list ( self . _connections . keys ( ) )
Get agent s current connections .
13,959
def publish ( self , artifact ) : self . env . add_artifact ( artifact ) self . _log ( logging . DEBUG , "Published {} to domain." . format ( artifact ) )
Publish artifact to agent s environment .
13,960
async def ask_opinion ( self , addr , artifact ) : remote_agent = await self . env . connect ( addr ) return await remote_agent . evaluate ( artifact )
Ask an agent s opinion about an artifact .
13,961
def localization_feature ( app ) : app . config [ 'BABEL_DEFAULT_LOCALE' ] = app . config [ 'DEFAULT_LOCALE' ] app . config [ 'BABEL_DEFAULT_TIMEZONE' ] = app . config [ 'DEFAULT_TIMEZONE' ] babel = Babel ( ) babel . init_app ( app )
Localization feature This will initialize support for translations and localization of values such as numbers money dates and formatting timezones .
13,962
def enrich_json_objects_by_object_type ( request , value ) : time_start_globally = time ( ) if isinstance ( value , list ) : json = [ x . to_json ( ) if hasattr ( x , "to_json" ) else x for x in value ] else : if isinstance ( value , dict ) : json = value else : json = value . to_json ( ) objects , nested = _collect_json_objects ( json , by = 'object_type' ) for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER ( ) : if len ( enricher_info [ 'object_types' ] ) > 0 : enricher_objects = flatten ( [ objects . get ( object_type , [ ] ) for object_type in enricher_info [ 'object_types' ] ] ) enricher_nested = any ( [ nested . get ( object_type , False ) for object_type in enricher_info [ 'object_types' ] ] ) else : enricher_objects = flatten ( objects . values ( ) ) enricher_nested = any ( nested . values ( ) ) if len ( enricher_objects ) > 0 : time_start = time ( ) enricher_info [ 'enricher' ] ( request , enricher_objects , enricher_nested ) LOGGER . debug ( 'enrichment "{}" took {} seconds' . format ( enricher_info [ 'enricher_name' ] , time ( ) - time_start ) ) if not enricher_info [ 'pure' ] : objects , nested = _collect_json_objects ( json , by = 'object_type' ) LOGGER . debug ( 'The whole enrichment of json objects by their object_type took {} seconds.' . format ( time ( ) - time_start_globally ) ) return json
Take the given value and start enrichment by object_type . The va
13,963
def enrich_by_predicate ( request , json , fun , predicate , skip_nested = False , ** kwargs ) : time_start = time ( ) collected = [ ] memory = { 'nested' : False } def _collect ( json_inner , nested ) : if nested and skip_nested : return if isinstance ( json_inner , list ) : list ( map ( lambda x : _collect ( x , nested ) , json_inner ) ) elif isinstance ( json_inner , dict ) : if predicate ( json_inner ) : collected . append ( json_inner ) if nested : memory [ 'nested' ] = True list ( map ( lambda x : _collect ( x , True ) , list ( json_inner . values ( ) ) ) ) _collect ( json , False ) if len ( collected ) > 0 : fun ( request , collected , memory [ 'nested' ] , ** kwargs ) LOGGER . debug ( "enrichment of JSON by predicate by '%s' function took %s seconds" , fun . __name__ , ( time ( ) - time_start ) ) return json
Take the JSON find all its subparts satisfying the given condition and them by the given function . Other key - word arguments are passed to the function .
13,964
def enrich_by_object_type ( request , json , fun , object_type , skip_nested = False , ** kwargs ) : if not isinstance ( object_type , list ) : object_type = [ object_type ] predicate = lambda x : 'object_type' in x and x [ 'object_type' ] in object_type return enrich_by_predicate ( request , json , fun , predicate , skip_nested = skip_nested , ** kwargs )
Take the JSON find its subparts having the given object part and transform them by the given function . Other key - word arguments are passed to the function .
13,965
def change_parent ( sender , instance , ** kwargs ) : if instance . id is None : return if len ( { 'term' , 'term_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'term' ] [ 0 ] if 'term' in diff else diff [ 'term_id' ] [ 0 ] child_id = instance . item_id if parent is not None : parent_id = parent . item_id if isinstance ( parent , Term ) else Term . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . get_or_create ( parent_id = instance . term . item_id , child_id = child_id , visible = True ) if len ( { 'term_secondary' , 'term_secondary_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff child_id = instance . item_id parent = diff [ 'term_secondary' ] [ 0 ] if 'term_secondary' in diff else diff [ 'term_secondary_id' ] [ 0 ] if parent is not None : parent_id = parent . item_id if isinstance ( parent , Term ) else Term . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) if instance . term_secondary is not None or instance . term_secondary_id is not None : ItemRelation . objects . get_or_create ( parent_id = instance . term_secondary . item_id , child_id = child_id , visible = True ) if len ( { 'context' , 'context_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'context' ] [ 0 ] if 'context' in diff else diff [ 'context_id' ] [ 0 ] child_id = instance . item_id if parent is not None : parent_id = parent . item_id if isinstance ( parent , Context ) else Context . objects . get ( pk = parent ) . item_id ItemRelation . objects . filter ( parent_id = parent_id , child_id = child_id ) . delete ( ) ItemRelation . objects . get_or_create ( parent_id = instance . context . item_id , child_id = child_id , visible = True )
When the given flashcard has changed . Look at term and context and change the corresponding item relation .
13,966
def example ( ) : b = Bonsai . retrieve ( '1lfa' , cache_dir = '/tmp' ) search_radius = 10.0 atom_of_interest = b . get_atom ( 1095 ) nearby_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius ) for na in nearby_atoms : assert ( na - atom_of_interest <= search_radius ) for fa in b . get_atom_set_complement ( nearby_atoms ) : assert ( fa - atom_of_interest > search_radius ) nearby_heavy_atoms = b . find_heavy_atoms_near_atom ( atom_of_interest , search_radius ) nearby_ca_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius , atom_names_to_include = [ 'CA' ] ) nearby_c_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius , atom_names_to_include = b . get_atom_names_by_group ( [ 'C' ] ) )
This section gives examples of how to use the module .
13,967
def from_non_aligned_residue_IDs ( Chain , StartResidueID , EndResidueID , Sequence = None ) : return PDBSection ( Chain , PDB . ResidueID2String ( StartResidueID ) , PDB . ResidueID2String ( EndResidueID ) , Sequence = Sequence )
A more forgiving method that does not care about the padding of the residue IDs .
13,968
def bin_atoms ( self ) : low_point = numpy . array ( [ self . min_x , self . min_y , self . min_z ] ) high_point = numpy . array ( [ self . max_x , self . max_y , self . max_z ] ) atom_bin_dimensions = numpy . ceil ( ( high_point - low_point ) / self . bin_size ) self . atom_bin_dimensions = ( int ( atom_bin_dimensions [ 0 ] ) - 1 , int ( atom_bin_dimensions [ 1 ] ) - 1 , int ( atom_bin_dimensions [ 2 ] ) - 1 ) atom_bins = [ ] for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : atom_bins . append ( [ ] ) for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : atom_bins [ x ] . append ( [ ] ) for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : atom_bins [ x ] [ y ] . append ( Bin ( x , y , z ) ) for serial_number , atom in self . atoms . iteritems ( ) : bin_location = numpy . trunc ( ( atom . point - low_point ) / self . bin_size ) bin = atom_bins [ int ( bin_location [ 0 ] ) ] [ int ( bin_location [ 1 ] ) ] [ int ( bin_location [ 2 ] ) ] bin . append ( atom ) atom . set_bin ( bin ) if self . safe_mode : num_atoms = 0 for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : num_atoms += len ( atom_bins [ x ] [ y ] [ z ] ) assert ( num_atoms == len ( self . atoms ) ) blank_section = ( ) for x in range ( int ( atom_bin_dimensions [ 0 ] ) ) : for y in range ( int ( atom_bin_dimensions [ 1 ] ) ) : for z in range ( int ( atom_bin_dimensions [ 2 ] ) ) : if not atom_bins [ x ] [ y ] [ z ] : atom_bins [ x ] [ y ] [ z ] = blank_section self . atom_bins = atom_bins
This function bins the Atoms into fixed - size sections of the protein space in 3D .
13,969
def find_heavy_atoms_near_atom ( self , source_atom , search_radius , atom_hit_cache = set ( ) , restrict_to_CA = False ) : non_heavy_atoms = self . get_atom_names_by_group ( set ( [ 'H' , 'D' , 'T' ] ) ) return self . find_atoms_near_atom ( source_atom , search_radius , atom_names_to_exclude = non_heavy_atoms , atom_hit_cache = atom_hit_cache , restrict_to_CA = restrict_to_CA )
atom_hit_cache is a set of atom serial numbers which have already been tested . We keep track of these to avoid recalculating the distance .
13,970
def get ( self , attr_name , * args ) : if not isinstance ( attr_name , six . string_types ) : raise TypeError ( 'attr_name must be a str.' ) if '-' in attr_name : attr_name = attr_name . replace ( '-' , '_' ) parent_attr = self attr = getattr ( parent_attr , attr_name , None ) for arg in args : if not isinstance ( arg , six . string_types ) : raise TypeError ( 'each additional argument must be a string. {0} was not a string' . format ( arg ) ) if hasattr ( parent_attr , arg ) : parent_attr = getattr ( parent_attr , arg ) if hasattr ( parent_attr , attr_name ) : attr = getattr ( parent_attr , attr_name ) else : pass return attr
Get the most retrieval attribute in the configuration file . This method will recursively look through the configuration file for the attribute specified and return the last found value or None . The values can be referenced by the key name provided in the configuration file or that value normalized with snake_casing .
13,971
def service_references ( self ) : services_blue_print = self . _scheme_references . get ( 'services' ) if services_blue_print is None : raise LookupError ( 'unable to find any services in the config.' ) return { key . replace ( '-' , '_' ) : key for key in services_blue_print [ 'keys' ] }
returns a list of service names
13,972
def validate ( self ) : if not isinstance ( self . _data , dict ) : raise TypeError ( 'freight forwarder configuration file must be a dict.' ) current_log_level = logger . get_level ( ) if self . _verbose : logger . set_level ( 'DEBUG' ) else : logger . set_level ( 'ERROR' ) logger . info ( 'Starting configuration validation' , extra = { "formatter" : 'config-start' } ) config_data = self . _data . copy ( ) try : self . _walk_tree ( config_data , ROOT_SCHEME ) except ConfigValidationException as e : e . log_error ( ) raise logger . info ( "Config validation passed." , extra = { 'formatter' : 'config-success' } ) logger . set_level ( current_log_level )
Validate the contents of the configuration file . Will return None if validation is successful or raise an error if not .
13,973
def _load ( self ) : if os . path . isdir ( self . _path ) : for file_ext in ( 'yml' , 'yaml' , 'json' ) : test_path = os . path . join ( self . _path , 'freight-forwarder.{0}' . format ( file_ext ) ) if os . path . isfile ( test_path ) : self . _path = test_path break if os . path . isfile ( self . _path ) : file_name , file_extension = os . path . splitext ( self . _path ) with open ( self . _path , 'r' ) as config_file : if file_extension in ( '.yaml' , '.yml' ) : self . _load_yml_config ( config_file . read ( ) ) elif file_extension == '.json' : try : config_data = json . loads ( config_file . read ( ) ) self . _data = normalize_keys ( config_data ) except Exception : raise SyntaxError ( "There is a syntax error in your freight-forwarder config." ) else : raise TypeError ( "Configuration file most be yaml or json." ) else : raise LookupError ( "Was unable to find a freight-forwarder configuration file." )
Load a configuration file . This method will be called when the Config class is instantiated . The configuration file can be json or yaml .
13,974
def _load_yml_config ( self , config_file ) : if not isinstance ( config_file , six . string_types ) : raise TypeError ( 'config_file must be a str.' ) try : def construct_yaml_int ( self , node ) : obj = SafeConstructor . construct_yaml_int ( self , node ) data = ConfigInt ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_float ( self , node ) : obj , = SafeConstructor . construct_yaml_float ( self , node ) data = ConfigFloat ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_str ( self , node ) : obj = SafeConstructor . construct_scalar ( self , node ) assert isinstance ( obj , six . string_types ) data = ConfigUnicode ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_mapping ( self , node ) : obj , = SafeConstructor . construct_yaml_map ( self , node ) data = ConfigDict ( obj , node . start_mark , node . end_mark ) return data def construct_yaml_seq ( self , node ) : obj , = SafeConstructor . construct_yaml_seq ( self , node ) data = ConfigSeq ( obj , node . start_mark , node . end_mark ) return data SafeConstructor . add_constructor ( u'tag:yaml.org,2002:float' , construct_yaml_float ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:int' , construct_yaml_int ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:map' , construct_yaml_mapping ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:seq' , construct_yaml_seq ) SafeConstructor . add_constructor ( u'tag:yaml.org,2002:str' , construct_yaml_str ) data = SafeLoader ( config_file ) . get_data ( ) if data is None : raise AttributeError ( 'The configuration file needs to have data in it.' ) self . _data = normalize_keys ( data , snake_case = False ) except YAMLError as e : if hasattr ( e , 'problem_mark' ) : mark = e . problem_mark raise SyntaxError ( "There is a syntax error in your freight-forwarder config file line: {0} column: {1}" . format ( mark . line + 1 , mark . column + 1 ) ) else : raise SyntaxError ( "There is a syntax error in your freight-forwarder config." )
loads a yaml str creates a few constructs for pyaml serializes and normalized the config data . Then assigns the config data to self . _data .
13,975
def _create_attr ( self , property_key , data , ancestors ) : if not isinstance ( property_key , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_key ) ) ) if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) previous_element = self normalized_key = normalize_value ( property_key ) . replace ( '-' , '_' ) normalized_ancestor_key = None if ancestors : for ancestor_key , ancestors_value in six . iteritems ( ancestors ) : normalized_ancestor_key = normalize_value ( ancestor_key ) . replace ( '-' , '_' ) if normalized_ancestor_key . lower ( ) == 'root' : continue if not hasattr ( previous_element , normalized_ancestor_key ) : config_attr = ConfigDict ( { } , ancestors_value . start_mark , ancestors_value . end_mark ) config_attr . name = normalized_ancestor_key config_attr . alias = ancestor_key setattr ( previous_element , normalized_ancestor_key , config_attr ) previous_element = getattr ( previous_element , normalized_ancestor_key ) if normalized_key == normalized_ancestor_key : pass else : if isinstance ( data , ConfigNode ) : data . name = normalized_key data . alias = property_key setattr ( previous_element , normalized_key , data )
Dynamically Creates attributes on for a Config . Also adds name and alias to each Config object .
13,976
def _collect_unrecognized_values ( self , scheme , data , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} was passed' . format ( type ( scheme ) ) ) unrecognized_values = { } if isinstance ( data , dict ) : pruned_scheme = [ key for key in scheme . keys ( ) if key not in RESERVED_SCHEME_KEYS and key [ 0 ] not in RESERVED_SCHEME_KEYS ] for key , value in six . iteritems ( data ) : if key in pruned_scheme : continue unrecognized_values [ key ] = value validations = scheme . get ( 'is' ) if validations and 'one_of' in validations : for nested_scheme in validations [ 'one_of' ] : if isinstance ( nested_scheme , dict ) : updated_scheme = self . _update_scheme ( nested_scheme , ancestors ) pruned_scheme = [ key for key in updated_scheme . keys ( ) if key not in RESERVED_SCHEME_KEYS and key [ 0 ] not in RESERVED_SCHEME_KEYS ] for key in pruned_scheme : if key in unrecognized_values : del unrecognized_values [ key ] else : pass return unrecognized_values
Looks for values that aren t defined in the scheme and returns a dict with any unrecognized values found .
13,977
def _update_scheme ( self , scheme , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} was passed' . format ( type ( scheme ) ) ) definitions = ROOT_SCHEME . get ( '_' ) if 'inherit' in scheme : scheme = self . _scheme_propagation ( scheme , definitions ) updated_scheme = { } for scheme_key in six . iterkeys ( scheme ) : if not isinstance ( scheme_key , six . string_types ) : raise TypeError ( 'scheme keys are required to be strings. type: {0} was passed.' . format ( scheme_key ) ) if '@' in scheme_key : ref = scheme_key [ 1 : ] scheme_reference = self . _scheme_references . get ( ref ) if not scheme_reference : raise ConfigValidationException ( ancestors , ref , scheme_reference , 'required' , scheme ) for reference_key in scheme_reference [ 'keys' ] : scheme_reference [ 'scheme' ] . update ( scheme [ scheme_key ] ) updated_scheme [ reference_key ] = scheme_reference [ 'scheme' ] elif '~' in scheme_key : ref = scheme_key [ 1 : ] scheme_reference = self . _scheme_references . get ( ref ) if not scheme_reference : raise LookupError ( "was unable to find {0} in scheme reference." . format ( ref ) ) for reference_key in scheme_reference [ 'keys' ] : updated_scheme [ reference_key ] = scheme [ scheme_key ] scheme . update ( updated_scheme ) return scheme
Updates the current scheme based off special pre - defined keys and retruns a new updated scheme .
13,978
def _walk_tree ( self , data , scheme , ancestors = None , property_name = None , prefix = None ) : if property_name is None : property_name = 'root' order = [ 'registries' ] + [ key for key in scheme . keys ( ) if key not in ( 'registries' , ) ] scheme = OrderedDict ( sorted ( scheme . items ( ) , key = lambda x : order . index ( x [ 0 ] ) ) ) if data is None : return elif not isinstance ( property_name , six . string_types ) : raise TypeError ( 'property_name must be a string.' ) ancestors = self . _update_ancestors ( data , property_name , ancestors ) if isinstance ( ancestors , OrderedDict ) : if list ( ancestors ) [ 0 ] != 'root' : raise LookupError ( 'root must be the first item in ancestors.' ) else : raise TypeError ( 'ancestors must be an OrderedDict. {0} was passed' . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. {0} was passed.' . format ( type ( scheme ) ) ) scheme = self . _update_scheme ( scheme , ancestors ) if property_name is not None and data : data = self . _get_cascading_attr ( property_name , * list ( ancestors ) [ 1 : ] ) if scheme . get ( 'cascading' , False ) else data for err in self . __execute_validations ( scheme . get ( 'is' , { } ) , data , property_name , ancestors , prefix = prefix ) : if err : raise err else : self . _create_attr ( property_name , data , ancestors ) self . __validate_unrecognized_values ( scheme , data , ancestors , prefix ) self . __populate_scheme_references ( scheme , property_name ) self . __validate_config_properties ( scheme , data , ancestors , prefix )
This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided . Will raise error on failure otherwise return None .
13,979
def _update_ancestors ( self , config_data , property_name , ancestors = None ) : if not isinstance ( property_name , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_name ) ) ) if ancestors is None : ancestors = OrderedDict ( [ ( 'root' , config_data ) ] ) elif not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) elif 'root' not in ancestors : raise LookupError ( 'root must be in ancestors. currently in the ancestors chain {0}' . format ( ', ' . join ( ancestors . keys ( ) ) ) ) ancestors = ancestors . copy ( ) for previous_key in list ( ancestors ) [ : : - 1 ] : previous_item = ancestors [ previous_key ] if isinstance ( config_data , dict ) : if property_name in previous_item : ancestors [ property_name ] = config_data break return ancestors
Update ancestors for a specific property .
13,980
def _reference_keys ( self , reference ) : if not isinstance ( reference , six . string_types ) : raise TypeError ( 'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.' . format ( type ( reference ) . __name__ ) ) if '~' in reference : reference = reference [ 1 : ] scheme = self . _scheme_references . get ( reference ) if not scheme : raise LookupError ( "Was unable to find {0} in the scheme references. " "available references {1}" . format ( reference , ', ' . join ( self . _scheme_references . keys ( ) ) ) ) return scheme [ 'keys' ] else : raise AttributeError ( 'references must start with ~. Please update {0} and retry.' . format ( reference ) )
Returns a list of all of keys for a given reference .
13,981
def __execute_validations ( self , validations , data , property_name , ancestors , negation = False , prefix = None ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( validations , dict ) : raise TypeError ( 'validations is required to be a dict. type: {1} was passed.' . format ( type ( validations ) ) ) if not isinstance ( property_name , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_name ) ) ) order = [ 'type' , 'required' ] + [ key for key in validations . keys ( ) if key not in ( 'required' , 'type' ) ] ordered_validations = OrderedDict ( sorted ( validations . items ( ) , key = lambda x : order . index ( x [ 0 ] ) ) ) for validation , value in six . iteritems ( ordered_validations ) : if validation in VALIDATORS : if validation == 'not' : for err in self . __execute_validations ( value , data , property_name , ancestors , negation , prefix ) : yield err continue for err in getattr ( self , '_{0}' . format ( validation ) ) ( value , data , property_name , ancestors , negation , prefix ) : yield err else : raise LookupError ( "{0} isn't a validator or reserved scheme key." . format ( validation ) )
Validate the data for a specific configuration value . This method will look up all of the validations provided and dynamically call any validation methods . If a validation fails a error will be thrown . If no errors are found a attributes will be dynamically created on the Config object for the configuration value .
13,982
def create ( name , url , tournament_type = "single elimination" , ** params ) : params . update ( { "name" : name , "url" : url , "tournament_type" : tournament_type , } ) return api . fetch_and_parse ( "POST" , "tournaments" , "tournament" , ** params )
Create a new tournament .
13,983
def users_feature ( app ) : if not app . config . get ( 'USER_JWT_SECRET' , None ) : raise x . JwtSecretMissing ( 'Please set USER_JWT_SECRET in config' ) app . session_interface = BoilerSessionInterface ( ) user_service . init ( app ) login_manager . init_app ( app ) login_manager . login_view = 'user.login' login_manager . login_message = None @ login_manager . user_loader def load_user ( id ) : return user_service . get ( id ) oauth . init_app ( app ) registry = OauthProviders ( app ) providers = registry . get_providers ( ) with app . app_context ( ) : for provider in providers : if provider not in oauth . remote_apps : oauth . remote_app ( provider , ** providers [ provider ] ) registry . register_token_getter ( provider ) principal . init_app ( app ) @ principal . identity_loader def load_identity ( ) : if current_user . is_authenticated : return Identity ( current_user . id ) session . pop ( 'identity.name' , None ) session . pop ( 'identity.auth_type' , None ) return AnonymousIdentity ( ) @ identity_loaded . connect_via ( app ) def on_identity_loaded ( sender , identity ) : identity . user = current_user if not current_user . is_authenticated : return identity . provides . add ( UserNeed ( current_user . id ) ) for role in current_user . roles : identity . provides . add ( RoleNeed ( role . handle ) )
Add users feature Allows to register users and assign groups instantiates flask login flask principal and oauth integration
13,984
def rename_document ( self , did , name ) : payload = { 'name' : name } return self . _api . request ( 'post' , '/api/documents/' + did , body = payload )
Renames the specified document .
13,985
def copy_workspace ( self , uri , new_name ) : payload = { 'isPublic' : True , 'newName' : new_name } return self . _api . request ( 'post' , '/api/documents/' + uri [ 'did' ] + '/workspaces/' + uri [ 'wvm' ] + '/copy' , body = payload )
Copy the current workspace .
13,986
def create_workspace ( self , did , name , version_id = None ) : payload = { 'isPublic' : True , 'name' : name , } if version_id : payload [ 'versionId' ] = version_id return self . _api . request ( 'post' , '/api/documents/d/' + did + '/workspaces' , body = payload )
Create a workspace in the specified document .
13,987
def get_partstudio_tessellatededges ( self , did , wid , eid ) : return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges' )
Gets the tessellation of the edges of all parts in a part studio .
13,988
def upload_blob ( self , did , wid , filepath = './blob.json' ) : chars = string . ascii_letters + string . digits boundary_key = '' . join ( random . choice ( chars ) for i in range ( 8 ) ) mimetype = mimetypes . guess_type ( filepath ) [ 0 ] encoded_filename = os . path . basename ( filepath ) file_content_length = str ( os . path . getsize ( filepath ) ) blob = open ( filepath ) req_headers = { 'Content-Type' : 'multipart/form-data; boundary="%s"' % boundary_key } payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n' payload += 'Content-Type: ' + mimetype + '\r\n\r\n' payload += blob . read ( ) payload += '\r\n--' + boundary_key + '--' return self . _api . request ( 'post' , '/api/blobelements/d/' + did + '/w/' + wid , headers = req_headers , body = payload )
Uploads a file to a new blob element in the specified doc .
13,989
def part_studio_stl ( self , did , wid , eid ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+octet-stream' } return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl' , headers = req_headers )
Exports STL export from a part studio
13,990
def create_assembly_instance ( self , assembly_uri , part_uri , configuration ) : payload = { "documentId" : part_uri [ "did" ] , "elementId" : part_uri [ "eid" ] , "versionId" : part_uri [ "wvm" ] , "isAssembly" : False , "isWholePartStudio" : True , "configuration" : self . encode_configuration ( part_uri [ "did" ] , part_uri [ "eid" ] , configuration ) } return self . _api . request ( 'post' , '/api/assemblies/d/' + assembly_uri [ "did" ] + '/' + assembly_uri [ "wvm_type" ] + '/' + assembly_uri [ "wvm" ] + '/e/' + assembly_uri [ "eid" ] + '/instances' , body = payload )
Insert a configurable part into an assembly .
13,991
def encode_configuration ( self , did , eid , parameters ) : parameters = [ { "parameterId" : k , "parameterValue" : v } for ( k , v ) in parameters . items ( ) ] payload = { 'parameters' : parameters } req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _api . request ( 'post' , '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings' , body = payload , headers = req_headers ) return json . loads ( res . content . decode ( "utf-8" ) ) [ "encodedId" ]
Encode parameters as a URL - ready string
13,992
def get_configuration ( self , uri ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } return self . _api . request ( 'get' , '/api/partstudios/d/' + uri [ "did" ] + '/' + uri [ "wvm_type" ] + '/' + uri [ "wvm" ] + '/e/' + uri [ "eid" ] + '/configuration' , headers = req_headers )
get the configuration of a PartStudio
13,993
def update_configuration ( self , did , wid , eid , payload ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _api . request ( 'post' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration' , body = payload , headers = req_headers ) return res
Update the configuration specified in the payload
13,994
def set_routing ( app , view_data ) : routing_modules = convert_routing_module ( view_data ) for module in routing_modules : view = import_string ( module . import_path ) app . add_url_rule ( module . url , view_func = view . as_view ( module . endpoint ) )
apply the routing configuration you ve described
13,995
def retrieve_commands ( self , module ) : commands = [ ] for name , obj in inspect . getmembers ( module ) : if name != 'Command' and 'Command' in name : if name != 'GlimCommand' : cobject = getattr ( module , name ) commands . append ( cobject ) return commands
Function smartly imports Command type classes given module
13,996
def match ( self , args ) : command = None for c in self . commands : if c . name == args . which : c . args = args command = c break return command
Function dispatches the active command line utility .
13,997
def dispatch ( self , command , app ) : if self . is_glimcommand ( command ) : command . run ( app ) else : command . run ( )
Function runs the active command .
13,998
def replace_sequence ( self , pdb_ID , chain_id , replacement_sequence ) : old_sequences = self . sequences old_unique_sequences = self . unique_sequences self . sequences = [ ] self . unique_sequences = { } for s in old_sequences : if s [ 0 ] == pdb_ID and s [ 1 ] == chain_id : self . _add_sequence ( pdb_ID , chain_id , replacement_sequence ) else : self . _add_sequence ( s [ 0 ] , s [ 1 ] , s [ 2 ] ) self . _find_identical_sequences ( )
Replaces a sequence with another . Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match .
13,999
def retrieve ( pdb_id , cache_dir = None , bio_cache = None ) : pdb_id = pdb_id . upper ( ) if bio_cache : return FASTA ( bio_cache . get_fasta_contents ( pdb_id ) ) if cache_dir : filename = os . path . join ( cache_dir , "%s.fasta" % pdb_id ) if os . path . exists ( filename ) : return FASTA ( read_file ( filename ) ) else : filename += ".txt" if os . path . exists ( filename ) : return FASTA ( read_file ( filename ) ) contents = rcsb . retrieve_fasta ( pdb_id ) if cache_dir : write_file ( os . path . join ( cache_dir , "%s.fasta" % pdb_id ) , contents ) return FASTA ( contents )
Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB .