idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
13,900 | def map_value ( self , value , gid ) : base_gid = self . base_gid_pattern . search ( gid ) . group ( 1 ) if self . anonymyze : try : if value in self . _maps [ base_gid ] : return self . _maps [ base_gid ] [ value ] else : k = ( len ( self . _maps [ base_gid ] ) + 1 ) % self . mapmax new_item = u'{0}_{1:0{2}d}' . forma... | Return the value for a group id applying requested mapping . Map only groups related to a filter ie when the basename of the group is identical to the name of a filter . |
13,901 | def match_to_dict ( self , match , gids ) : values = { } for gid in gids : try : values [ gid ] = self . map_value ( match . group ( gid ) , gid ) except IndexError : pass return values | Map values from match into a dictionary . |
13,902 | def match_to_string ( self , match , gids , values = None ) : s = match . string parts = [ ] k = 0 for gid in sorted ( gids , key = lambda x : gids [ x ] ) : if values is None : try : value = self . map_value ( match . group ( gid ) , gid ) parts . append ( s [ k : match . start ( gid ) ] ) parts . append ( value ) k =... | Return the mapped string from match object . If a dictionary of values is provided then use it to build the string . |
13,903 | def gethost ( self , ip_addr ) : try : if ip_addr [ : 7 ] == '::ffff:' : ip_addr = ip_addr [ 7 : ] except TypeError : pass if ip_addr [ 0 ] in string . letters : return ip_addr try : return self . hostsmap [ ip_addr ] except KeyError : pass try : name = socket . gethostbyaddr ( ip_addr ) [ 0 ] except socket . error : n... | Do reverse lookup on an ip address |
13,904 | def getuname ( self , uid ) : uid = int ( uid ) try : return self . uidsmap [ uid ] except KeyError : pass try : name = pwd . getpwuid ( uid ) [ 0 ] except ( KeyError , AttributeError ) : name = "uid=%d" % uid self . uidsmap [ uid ] = name return name | Get the username of a given uid . |
13,905 | def redirect ( endpoint , ** kw ) : _endpoint = None if isinstance ( endpoint , six . string_types ) : _endpoint = endpoint if "/" in endpoint : return f_redirect ( endpoint ) else : for r in Mocha . _app . url_map . iter_rules ( ) : _endpoint = endpoint if 'GET' in r . methods and endpoint in r . endpoint : _endpoint ... | Redirect allow to redirect dynamically using the classes methods without knowing the right endpoint . Expecting all endpoint have GET as method it will try to pick the first match based on the endpoint provided or the based on the Rule map_url |
13,906 | def get_true_argspec ( method ) : argspec = inspect . getargspec ( method ) args = argspec [ 0 ] if args and args [ 0 ] == 'self' : return argspec if hasattr ( method , '__func__' ) : method = method . __func__ if not hasattr ( method , '__closure__' ) or method . __closure__ is None : raise DecoratorCompatibilityError... | Drills through layers of decorators attempting to locate the actual argspec for the method . |
13,907 | def setup_installed_apps ( cls ) : cls . _installed_apps = cls . _app . config . get ( "INSTALLED_APPS" , [ ] ) if cls . _installed_apps : def import_app ( module , props = { } ) : _ = werkzeug . import_string ( module ) setattr ( _ , "__options__" , utils . dict_dot ( props ) ) for k in cls . _installed_apps : if isin... | To import 3rd party applications along with associated properties |
13,908 | def _add_asset_bundle ( cls , path ) : f = "%s/assets.yml" % path if os . path . isfile ( f ) : cls . _asset_bundles . add ( f ) | Add a webassets bundle yml file |
13,909 | def _setup_db ( cls ) : uri = cls . _app . config . get ( "DB_URL" ) if uri : db . connect__ ( uri , cls . _app ) | Setup the DB connection if DB_URL is set |
13,910 | def parse_options ( cls , options ) : options = options . copy ( ) subdomain = options . pop ( 'subdomain' , None ) endpoint = options . pop ( 'endpoint' , None ) return subdomain , endpoint , options , | Extracts subdomain and endpoint values from the options dict and returns them along with a new dict without those values . |
13,911 | def get_base_route ( cls ) : base_route = cls . __name__ . lower ( ) if cls . base_route is not None : base_route = cls . base_route base_rule = parse_rule ( base_route ) cls . base_args = [ r [ 2 ] for r in base_rule ] return base_route . strip ( "/" ) | Returns the route base to use for the current class . |
13,912 | def find_gene_by_name ( self , gene_name : str ) -> Gene : for gene in self . genes : if gene . name == gene_name : return gene raise AttributeError ( f'gene "{gene_name}" does not exist' ) | Find and return a gene in the influence graph with the given name . Raise an AttributeError if there is no gene in the graph with the given name . |
13,913 | def find_multiplex_by_name ( self , multiplex_name : str ) -> Multiplex : for multiplex in self . multiplexes : if multiplex . name == multiplex_name : return multiplex raise AttributeError ( f'multiplex "{multiplex_name}" does not exist' ) | Find and return a multiplex in the influence graph with the given name . Raise an AttributeError if there is no multiplex in the graph with the given name . |
13,914 | def all_states ( self ) -> Tuple [ State , ... ] : return tuple ( self . _transform_list_of_states_to_state ( states ) for states in self . _cartesian_product_of_every_states_of_each_genes ( ) ) | Return all the possible states of this influence graph . |
13,915 | def _cartesian_product_of_every_states_of_each_genes ( self ) -> Tuple [ Tuple [ int , ... ] ] : if not self . genes : return ( ) return tuple ( product ( * [ gene . states for gene in self . genes ] ) ) | Private method which return the cartesian product of the states of the genes in the model . It represents all the possible state for a given model . |
13,916 | def _transform_list_of_states_to_state ( self , state : List [ int ] ) -> State : return State ( { gene : state [ i ] for i , gene in enumerate ( self . genes ) } ) | Private method which transform a list which contains the state of the gene in the models to a State object . |
13,917 | def read_sha1 ( file_path , buf_size = None , start_byte = 0 , read_size = None , extra_hashers = [ ] , ) : read_size = read_size or os . stat ( file_path ) . st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib . sha1 ( ) while data_read < read_size : with open ( file_path , 'rb' , buf... | Determines the sha1 hash of a file in chunks to prevent loading the entire file at once into memory |
13,918 | def verify_uploaded_file ( self , destination_folder_id , source_path , verbose = True , ) : source_file_size = os . stat ( source_path ) . st_size total_part_size = 0 file_position = 0 uploaded_box_file_ids = self . find_file ( destination_folder_id , os . path . basename ( source_path ) ) total_sha1 = hashlib . sha1 ... | Verifies the integrity of a file uploaded to Box |
13,919 | def handle_resourcelist ( ltext , ** kwargs ) : base = kwargs . get ( 'base' , VERSA_BASEIRI ) model = kwargs . get ( 'model' ) iris = ltext . strip ( ) . split ( ) newlist = model . generate_resource ( ) for i in iris : model . add ( newlist , VERSA_BASEIRI + 'item' , I ( iri . absolutize ( i , base ) ) ) return newli... | A helper that converts lists of resources from a textual format such as Markdown including absolutizing relative IRIs |
13,920 | def handle_resourceset ( ltext , ** kwargs ) : fullprop = kwargs . get ( 'fullprop' ) rid = kwargs . get ( 'rid' ) base = kwargs . get ( 'base' , VERSA_BASEIRI ) model = kwargs . get ( 'model' ) iris = ltext . strip ( ) . split ( ) for i in iris : model . add ( rid , fullprop , I ( iri . absolutize ( i , base ) ) ) ret... | A helper that converts sets of resources from a textual format such as Markdown including absolutizing relative IRIs |
13,921 | def create_cache_database ( self ) : conn = sqlite3 . connect ( self . database ) conn . text_factory = str c = conn . cursor ( ) c . execute ( ) c . execute ( ) c . execute ( ) conn . commit ( ) conn . close ( ) | Create a new SQLite3 database for use with Cache objects |
13,922 | def __exists_row_not_too_old ( self , row ) : if row is None : return False record_time = dateutil . parser . parse ( row [ 2 ] ) now = datetime . datetime . now ( dateutil . tz . gettz ( ) ) age = ( record_time - now ) . total_seconds ( ) if age > self . max_age : return False return True | Check if the given row exists and is not too old |
13,923 | def has_item ( self , item_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM items WHERE url=?" , ( str ( item_url ) , ) ) row = c . fetchone ( ) c . close ( ) return self . __exists_row_not_too_old ( row ) | Check if the metadata for the given item is present in the cache |
13,924 | def has_document ( self , doc_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) row = c . fetchone ( ) c . close ( ) return self . __exists_row_not_too_old ( row ) | Check if the content of the given document is present in the cache |
13,925 | def get_document ( self , doc_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) row = c . fetchone ( ) c . close ( ) if row is None : raise ValueError ( "Item not present in cache" ) file_path = row [ 1 ] try : with open ( file_path , 'rb' ) as f : return... | Retrieve the content for the given document from the cache . |
13,926 | def get_primary_text ( self , item_url ) : c = self . conn . cursor ( ) c . execute ( "SELECT * FROM primary_texts WHERE item_url=?" , ( str ( item_url ) , ) ) row = c . fetchone ( ) c . close ( ) if row is None : raise ValueError ( "Item not present in cache" ) return row [ 1 ] | Retrieve the primary text for the given item from the cache . |
13,927 | def add_item ( self , item_url , item_metadata ) : c = self . conn . cursor ( ) c . execute ( "DELETE FROM items WHERE url=?" , ( str ( item_url ) , ) ) self . conn . commit ( ) c . execute ( "INSERT INTO items VALUES (?, ?, ?)" , ( str ( item_url ) , item_metadata , self . __now_iso_8601 ( ) ) ) self . conn . commit (... | Add the given item to the cache database updating the existing metadata if the item is already present |
13,928 | def add_document ( self , doc_url , data ) : file_path = self . __generate_filepath ( ) with open ( file_path , 'wb' ) as f : f . write ( data ) c = self . conn . cursor ( ) c . execute ( "SELECT * FROM documents WHERE url=?" , ( str ( doc_url ) , ) ) for row in c . fetchall ( ) : old_file_path = row [ 1 ] if os . path... | Add the given document to the cache updating the existing content data if the document is already present |
13,929 | def add_primary_text ( self , item_url , primary_text ) : c = self . conn . cursor ( ) c . execute ( "DELETE FROM primary_texts WHERE item_url=?" , ( str ( item_url ) , ) ) self . conn . commit ( ) c . execute ( "INSERT INTO primary_texts VALUES (?, ?, ?)" , ( str ( item_url ) , primary_text , self . __now_iso_8601 ( )... | Add the given primary text to the cache database updating the existing record if the primary text is already present |
13,930 | async def profile ( self , ctx , tag ) : if not self . check_valid_tag ( tag ) : return await ctx . send ( 'Invalid tag!' ) profile = await self . cr . get_profile ( tag ) em = discord . Embed ( color = 0x00FFFFF ) em . set_author ( name = str ( profile ) , icon_url = profile . clan_badge_url ) em . set_thumbnail ( url... | Example command for use inside a discord bot cog . |
13,931 | def write_remaining ( self ) : if not self . results : return with db . execution_context ( ) : with db . atomic ( ) : Result . insert_many ( self . results ) . execute ( ) del self . results [ : ] | Write the remaning stack content |
13,932 | def configure ( project_path , config_file = None ) : if config_file is None : config_file = os . path . join ( project_path , 'config.json' ) try : with open ( config_file , 'r' ) as f : config = json . load ( f ) except ValueError as e : raise OctConfigurationError ( "Configuration setting failed with error: %s" % e ... | Get the configuration of the test and return it as a config object |
13,933 | def configure_for_turret ( project_name , config_file ) : config = configure ( project_name , config_file ) for key in WARNING_CONFIG_KEYS : if key not in config : print ( "WARNING: %s configuration key not present, the value will be set to default value" % key ) common_config = { 'hq_address' : config . get ( 'hq_addr... | Load the configuration file in python dict and check for keys that will be set to default value if not present |
13,934 | def get_db_uri ( config , output_dir ) : db_config = config . get ( "results_database" , { "db_uri" : "default" } ) if db_config [ 'db_uri' ] == 'default' : return os . path . join ( output_dir , "results.sqlite" ) return db_config [ 'db_uri' ] | Process results_database parameters in config to format them for set database function |
13,935 | def update ( self ) : self . json = c . get_document ( self . uri . did ) . json ( ) self . e_list = c . element_list ( self . uri . as_dict ( ) ) . json ( ) | All client calls to update this instance with Onshape . |
13,936 | def find_element ( self , name , type = ElementType . ANY ) : for e in self . e_list : if type . value and not e [ 'elementType' ] == type : continue if e [ "name" ] == name : uri = self . uri uri . eid = e [ "id" ] return uri | Find an elemnent in the document with the given name - could be a PartStudio Assembly or blob . |
13,937 | def beta_array ( C , HIGHSCALE , * args , ** kwargs ) : beta_odict = beta ( C , HIGHSCALE , * args , ** kwargs ) return np . hstack ( [ np . asarray ( b ) . ravel ( ) for b in beta_odict . values ( ) ] ) | Return the beta functions of all SM parameters and SMEFT Wilson coefficients as a 1D numpy array . |
13,938 | def _search ( self , query , search_term ) : criterias = mongoengine . Q ( ) rel_criterias = mongoengine . Q ( ) terms = shlex . split ( search_term ) if len ( terms ) == 1 and re . match ( RE_OBJECTID , terms [ 0 ] ) : q = query . filter ( id = bson . ObjectId ( terms [ 0 ] ) ) if q . count ( ) == 1 : return q for ter... | Improved search between words . |
13,939 | def set_data_from_iterable ( self , frames , values , labels = None ) : if not isinstance ( frames , collections . Iterable ) : raise TypeError , "frames must be an iterable" if not isinstance ( values , collections . Iterable ) : raise TypeError , "values must be an iterable" assert ( len ( frames ) == len ( values ) ... | Initialize a dataset structure from iterable parameters |
13,940 | def writexml ( self , writer , indent = "" , addindent = "" , newl = "" ) : writer . write ( '%s<dataset id="%s" dimensions="%s">%s' % ( indent , self . datasetid , self . dimensions , newl ) ) indent2 = indent + addindent for l , x , y in zip ( self . labels , self . frames , self . values ) : writer . write ( '%s<poi... | Write the continuous dataset using sonic visualiser xml conventions |
13,941 | def gaus_pdf ( x , mean , std ) : return exp ( - ( ( x - mean ) / std ) ** 2 / 2 ) / sqrt ( 2 * pi ) / std | Gaussian distribution s probability density function . |
13,942 | def logistic ( x , x0 , k , L ) : return L / ( 1 + exp ( - k * ( x - x0 ) ) ) | Logistic function . |
13,943 | def populate_menv ( menv , agent_cls_name , log_folder ) : gs = menv . gs n_agents = gs [ 0 ] * gs [ 1 ] n_slaves = len ( menv . addrs ) logger . info ( "Populating {} with {} agents" . format ( HOST , n_agents * n_slaves ) ) run ( menv . populate ( agent_cls_name , n_agents , log_folder = log_folder ) ) logger . info ... | Populate given multiprocessing grid environment with agents . |
13,944 | def get_slave_addrs ( mgr_addr , N ) : return [ ( HOST , p ) for p in range ( mgr_addr + 1 , mgr_addr + 1 + N ) ] | Get ports for the slave environments . |
13,945 | def weighted_average ( rule , artifact ) : e = 0 w = 0 for i in range ( len ( rule . R ) ) : r = rule . R [ i ] ( artifact ) if r is not None : e += r * rule . W [ i ] w += abs ( rule . W [ i ] ) if w == 0.0 : return 0.0 return e / w | Evaluate artifact s value to be weighted average of values returned by rule s subrules . |
13,946 | def minimum ( rule , artifact ) : m = 1.0 for i in range ( len ( rule . R ) ) : e = rule . R [ i ] ( artifact ) if e is not None : if e < m : m = e return m | Evaluate artifact s value to be minimum of values returned by rule s subrules . |
13,947 | def add_subrule ( self , subrule , weight ) : if not issubclass ( subrule . __class__ , ( Rule , RuleLeaf ) ) : raise TypeError ( "Rule's class must be (subclass of) {} or {}, got " "{}." . format ( Rule , RuleLeaf , subrule . __class__ ) ) self . __domains = set . union ( self . __domains , subrule . domains ) self . ... | Add subrule to the rule . |
13,948 | def parse_seqres ( self , pdb ) : seqresre = re . compile ( "SEQRES" ) seqreslines = [ line for line in pdb . lines if seqresre . match ( line ) ] for line in seqreslines : chain = line [ 11 ] resnames = line [ 19 : 70 ] . strip ( ) self . setdefault ( chain , [ ] ) self [ chain ] += resnames . split ( ) | Parse the SEQRES entries into the object |
13,949 | def parse_atoms ( self , pdb ) : atomre = re . compile ( "ATOM" ) atomlines = [ line for line in pdb . lines if atomre . match ( line ) ] chainresnums = { } for line in atomlines : chain = line [ 21 ] resname = line [ 17 : 20 ] resnum = line [ 22 : 27 ] chainresnums . setdefault ( chain , [ ] ) if resnum in chainresnum... | Parse the ATOM entries into the object |
13,950 | def seqres_lines ( self ) : lines = [ ] for chain in self . keys ( ) : seq = self [ chain ] serNum = 1 startidx = 0 while startidx < len ( seq ) : endidx = min ( startidx + 13 , len ( seq ) ) lines += [ "SEQRES %2i %s %4i %s\n" % ( serNum , chain , len ( seq ) , " " . join ( seq [ startidx : endidx ] ) ) ] serNum += ... | Generate SEQRES lines representing the contents |
13,951 | def replace_seqres ( self , pdb , update_atoms = True ) : newpdb = PDB ( ) inserted_seqres = False entries_before_seqres = set ( [ "HEADER" , "OBSLTE" , "TITLE" , "CAVEAT" , "COMPND" , "SOURCE" , "KEYWDS" , "EXPDTA" , "AUTHOR" , "REVDAT" , "SPRSDE" , "JRNL" , "REMARK" , "DBREF" , "SEQADV" ] ) mutated_resids = { } if up... | Replace SEQRES lines with a new sequence optionally removing mutated sidechains |
13,952 | def has_host_match ( log_data , hosts ) : hostname = getattr ( log_data , 'host' , None ) if hostname and hostname not in host_cache : for host_pattern in hosts : if host_pattern . search ( hostname ) is not None : host_cache . add ( hostname ) return True else : return False return True | Match the data with a list of hostname patterns . If the log line data doesn t include host information considers the line as matched . |
13,953 | def run ( self , app ) : GlimLog . info ( 'Glim server started on %s environment' % self . args . env ) try : kwargs = Config . get ( 'app.server.options' ) run ( app . wsgi , host = Config . get ( 'app.server.host' ) , port = Config . get ( 'app.server.port' ) , debug = Config . get ( 'app.server.debugger' ) , reloade... | Function starts the web server given configuration . |
13,954 | def get_symmetrical_std_devs ( values , ignore_zeros = True ) : pos_stdeviation = get_symmetrical_std_dev ( values , True , ignore_zeros = ignore_zeros ) neg_stdeviation = get_symmetrical_std_dev ( values , False , ignore_zeros = ignore_zeros ) return pos_stdeviation , neg_stdeviation | Takes a list of values and splits it into positive and negative values . For both of these subsets a symmetrical distribution is created by mirroring each value along the origin and the standard deviation for both subsets is returned . |
13,955 | def get_std_xy_dataset_statistics ( x_values , y_values , expect_negative_correlation = False , STDev_cutoff = 1.0 ) : assert ( len ( x_values ) == len ( y_values ) ) csv_lines = [ 'ID,X,Y' ] + [ ',' . join ( map ( str , [ c + 1 , x_values [ c ] , y_values [ c ] ] ) ) for c in xrange ( len ( x_values ) ) ] data = parse... | Calls parse_csv and returns the analysis in a format similar to get_xy_dataset_statistics in klab . stats . misc . |
13,956 | def active_multiplex ( self , state : 'State' ) -> Tuple [ 'Multiplex' ] : return tuple ( multiplex for multiplex in self . multiplexes if multiplex . is_active ( state ) ) | Return a tuple of all the active multiplex in the given state . |
13,957 | def sanitized_name ( self ) : a = re . split ( "[:/]" , self . name ) return "_" . join ( [ i for i in a if len ( i ) > 0 ] ) | Sanitized name of the agent used for file and directory creation . |
13,958 | def get_connections ( self , data = False ) : if data : return self . _connections return list ( self . _connections . keys ( ) ) | Get agent s current connections . |
13,959 | def publish ( self , artifact ) : self . env . add_artifact ( artifact ) self . _log ( logging . DEBUG , "Published {} to domain." . format ( artifact ) ) | Publish artifact to agent s environment . |
13,960 | async def ask_opinion ( self , addr , artifact ) : remote_agent = await self . env . connect ( addr ) return await remote_agent . evaluate ( artifact ) | Ask an agent s opinion about an artifact . |
13,961 | def localization_feature ( app ) : app . config [ 'BABEL_DEFAULT_LOCALE' ] = app . config [ 'DEFAULT_LOCALE' ] app . config [ 'BABEL_DEFAULT_TIMEZONE' ] = app . config [ 'DEFAULT_TIMEZONE' ] babel = Babel ( ) babel . init_app ( app ) | Localization feature This will initialize support for translations and localization of values such as numbers money dates and formatting timezones . |
13,962 | def enrich_json_objects_by_object_type ( request , value ) : time_start_globally = time ( ) if isinstance ( value , list ) : json = [ x . to_json ( ) if hasattr ( x , "to_json" ) else x for x in value ] else : if isinstance ( value , dict ) : json = value else : json = value . to_json ( ) objects , nested = _collect_js... | Take the given value and start enrichment by object_type . The va |
13,963 | def enrich_by_predicate ( request , json , fun , predicate , skip_nested = False , ** kwargs ) : time_start = time ( ) collected = [ ] memory = { 'nested' : False } def _collect ( json_inner , nested ) : if nested and skip_nested : return if isinstance ( json_inner , list ) : list ( map ( lambda x : _collect ( x , nest... | Take the JSON find all its subparts satisfying the given condition and them by the given function . Other key - word arguments are passed to the function . |
13,964 | def enrich_by_object_type ( request , json , fun , object_type , skip_nested = False , ** kwargs ) : if not isinstance ( object_type , list ) : object_type = [ object_type ] predicate = lambda x : 'object_type' in x and x [ 'object_type' ] in object_type return enrich_by_predicate ( request , json , fun , predicate , s... | Take the JSON find its subparts having the given object part and transform them by the given function . Other key - word arguments are passed to the function . |
13,965 | def change_parent ( sender , instance , ** kwargs ) : if instance . id is None : return if len ( { 'term' , 'term_id' } & set ( instance . changed_fields ) ) != 0 : diff = instance . diff parent = diff [ 'term' ] [ 0 ] if 'term' in diff else diff [ 'term_id' ] [ 0 ] child_id = instance . item_id if parent is not None :... | When the given flashcard has changed . Look at term and context and change the corresponding item relation . |
13,966 | def example ( ) : b = Bonsai . retrieve ( '1lfa' , cache_dir = '/tmp' ) search_radius = 10.0 atom_of_interest = b . get_atom ( 1095 ) nearby_atoms = b . find_atoms_near_atom ( atom_of_interest , search_radius ) for na in nearby_atoms : assert ( na - atom_of_interest <= search_radius ) for fa in b . get_atom_set_complem... | This section gives examples of how to use the module . |
13,967 | def from_non_aligned_residue_IDs ( Chain , StartResidueID , EndResidueID , Sequence = None ) : return PDBSection ( Chain , PDB . ResidueID2String ( StartResidueID ) , PDB . ResidueID2String ( EndResidueID ) , Sequence = Sequence ) | A more forgiving method that does not care about the padding of the residue IDs . |
13,968 | def bin_atoms ( self ) : low_point = numpy . array ( [ self . min_x , self . min_y , self . min_z ] ) high_point = numpy . array ( [ self . max_x , self . max_y , self . max_z ] ) atom_bin_dimensions = numpy . ceil ( ( high_point - low_point ) / self . bin_size ) self . atom_bin_dimensions = ( int ( atom_bin_dimensions... | This function bins the Atoms into fixed - size sections of the protein space in 3D . |
13,969 | def find_heavy_atoms_near_atom ( self , source_atom , search_radius , atom_hit_cache = set ( ) , restrict_to_CA = False ) : non_heavy_atoms = self . get_atom_names_by_group ( set ( [ 'H' , 'D' , 'T' ] ) ) return self . find_atoms_near_atom ( source_atom , search_radius , atom_names_to_exclude = non_heavy_atoms , atom_h... | atom_hit_cache is a set of atom serial numbers which have already been tested . We keep track of these to avoid recalculating the distance . |
13,970 | def get ( self , attr_name , * args ) : if not isinstance ( attr_name , six . string_types ) : raise TypeError ( 'attr_name must be a str.' ) if '-' in attr_name : attr_name = attr_name . replace ( '-' , '_' ) parent_attr = self attr = getattr ( parent_attr , attr_name , None ) for arg in args : if not isinstance ( arg... | Get the most retrieval attribute in the configuration file . This method will recursively look through the configuration file for the attribute specified and return the last found value or None . The values can be referenced by the key name provided in the configuration file or that value normalized with snake_casing . |
13,971 | def service_references ( self ) : services_blue_print = self . _scheme_references . get ( 'services' ) if services_blue_print is None : raise LookupError ( 'unable to find any services in the config.' ) return { key . replace ( '-' , '_' ) : key for key in services_blue_print [ 'keys' ] } | returns a list of service names |
13,972 | def validate ( self ) : if not isinstance ( self . _data , dict ) : raise TypeError ( 'freight forwarder configuration file must be a dict.' ) current_log_level = logger . get_level ( ) if self . _verbose : logger . set_level ( 'DEBUG' ) else : logger . set_level ( 'ERROR' ) logger . info ( 'Starting configuration vali... | Validate the contents of the configuration file . Will return None if validation is successful or raise an error if not . |
13,973 | def _load ( self ) : if os . path . isdir ( self . _path ) : for file_ext in ( 'yml' , 'yaml' , 'json' ) : test_path = os . path . join ( self . _path , 'freight-forwarder.{0}' . format ( file_ext ) ) if os . path . isfile ( test_path ) : self . _path = test_path break if os . path . isfile ( self . _path ) : file_name... | Load a configuration file . This method will be called when the Config class is instantiated . The configuration file can be json or yaml . |
13,974 | def _load_yml_config ( self , config_file ) : if not isinstance ( config_file , six . string_types ) : raise TypeError ( 'config_file must be a str.' ) try : def construct_yaml_int ( self , node ) : obj = SafeConstructor . construct_yaml_int ( self , node ) data = ConfigInt ( obj , node . start_mark , node . end_mark )... | loads a yaml str creates a few constructs for pyaml serializes and normalized the config data . Then assigns the config data to self . _data . |
13,975 | def _create_attr ( self , property_key , data , ancestors ) : if not isinstance ( property_key , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_key ) ) ) if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an Or... | Dynamically Creates attributes on for a Config . Also adds name and alias to each Config object . |
13,976 | def _collect_unrecognized_values ( self , scheme , data , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} ... | Looks for values that aren t defined in the scheme and returns a dict with any unrecognized values found . |
13,977 | def _update_scheme ( self , scheme , ancestors ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( scheme , dict ) : raise TypeError ( 'scheme must be a dict. type: {0} was passed' . format ... | Updates the current scheme based off special pre - defined keys and retruns a new updated scheme . |
13,978 | def _walk_tree ( self , data , scheme , ancestors = None , property_name = None , prefix = None ) : if property_name is None : property_name = 'root' order = [ 'registries' ] + [ key for key in scheme . keys ( ) if key not in ( 'registries' , ) ] scheme = OrderedDict ( sorted ( scheme . items ( ) , key = lambda x : ord... | This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided . Will raise error on failure otherwise return None . |
13,979 | def _update_ancestors ( self , config_data , property_name , ancestors = None ) : if not isinstance ( property_name , six . string_types ) : raise TypeError ( "property_key must be a string. type: {0} was passed." . format ( type ( property_name ) ) ) if ancestors is None : ancestors = OrderedDict ( [ ( 'root' , config... | Update ancestors for a specific property . |
13,980 | def _reference_keys ( self , reference ) : if not isinstance ( reference , six . string_types ) : raise TypeError ( 'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.' . format ( type ( reference ) . __name__ ) ) if '~' in reference : reference = reference [ 1 : ] scheme = self . _sche... | Returns a list of all of keys for a given reference . |
13,981 | def __execute_validations ( self , validations , data , property_name , ancestors , negation = False , prefix = None ) : if not isinstance ( ancestors , OrderedDict ) : raise TypeError ( "ancestors must be an OrderedDict. type: {0} was passed." . format ( type ( ancestors ) ) ) if not isinstance ( validations , dict ) ... | Validate the data for a specific configuration value . This method will look up all of the validations provided and dynamically call any validation methods . If a validation fails a error will be thrown . If no errors are found a attributes will be dynamically created on the Config object for the configuration value . |
13,982 | def create ( name , url , tournament_type = "single elimination" , ** params ) : params . update ( { "name" : name , "url" : url , "tournament_type" : tournament_type , } ) return api . fetch_and_parse ( "POST" , "tournaments" , "tournament" , ** params ) | Create a new tournament . |
13,983 | def users_feature ( app ) : if not app . config . get ( 'USER_JWT_SECRET' , None ) : raise x . JwtSecretMissing ( 'Please set USER_JWT_SECRET in config' ) app . session_interface = BoilerSessionInterface ( ) user_service . init ( app ) login_manager . init_app ( app ) login_manager . login_view = 'user.login' login_man... | Add users feature Allows to register users and assign groups instantiates flask login flask principal and oauth integration |
13,984 | def rename_document ( self , did , name ) : payload = { 'name' : name } return self . _api . request ( 'post' , '/api/documents/' + did , body = payload ) | Renames the specified document . |
13,985 | def copy_workspace ( self , uri , new_name ) : payload = { 'isPublic' : True , 'newName' : new_name } return self . _api . request ( 'post' , '/api/documents/' + uri [ 'did' ] + '/workspaces/' + uri [ 'wvm' ] + '/copy' , body = payload ) | Copy the current workspace . |
13,986 | def create_workspace ( self , did , name , version_id = None ) : payload = { 'isPublic' : True , 'name' : name , } if version_id : payload [ 'versionId' ] = version_id return self . _api . request ( 'post' , '/api/documents/d/' + did + '/workspaces' , body = payload ) | Create a workspace in the specified document . |
13,987 | def get_partstudio_tessellatededges ( self , did , wid , eid ) : return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges' ) | Gets the tessellation of the edges of all parts in a part studio . |
13,988 | def upload_blob ( self , did , wid , filepath = './blob.json' ) : chars = string . ascii_letters + string . digits boundary_key = '' . join ( random . choice ( chars ) for i in range ( 8 ) ) mimetype = mimetypes . guess_type ( filepath ) [ 0 ] encoded_filename = os . path . basename ( filepath ) file_content_length = s... | Uploads a file to a new blob element in the specified doc . |
13,989 | def part_studio_stl ( self , did , wid , eid ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+octet-stream' } return self . _api . request ( 'get' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl' , headers = req_headers ) | Exports STL export from a part studio |
13,990 | def create_assembly_instance ( self , assembly_uri , part_uri , configuration ) : payload = { "documentId" : part_uri [ "did" ] , "elementId" : part_uri [ "eid" ] , "versionId" : part_uri [ "wvm" ] , "isAssembly" : False , "isWholePartStudio" : True , "configuration" : self . encode_configuration ( part_uri [ "did" ] ,... | Insert a configurable part into an assembly . |
13,991 | def encode_configuration ( self , did , eid , parameters ) : parameters = [ { "parameterId" : k , "parameterValue" : v } for ( k , v ) in parameters . items ( ) ] payload = { 'parameters' : parameters } req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _ap... | Encode parameters as a URL - ready string |
13,992 | def get_configuration ( self , uri ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } return self . _api . request ( 'get' , '/api/partstudios/d/' + uri [ "did" ] + '/' + uri [ "wvm_type" ] + '/' + uri [ "wvm" ] + '/e/' + uri [ "eid" ] + '/configuration' , headers =... | get the configuration of a PartStudio |
13,993 | def update_configuration ( self , did , wid , eid , payload ) : req_headers = { 'Accept' : 'application/vnd.onshape.v1+json' , 'Content-Type' : 'application/json' } res = self . _api . request ( 'post' , '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration' , body = payload , headers = req_headers ... | Update the configuration specified in the payload |
13,994 | def set_routing ( app , view_data ) : routing_modules = convert_routing_module ( view_data ) for module in routing_modules : view = import_string ( module . import_path ) app . add_url_rule ( module . url , view_func = view . as_view ( module . endpoint ) ) | apply the routing configuration you ve described |
13,995 | def retrieve_commands ( self , module ) : commands = [ ] for name , obj in inspect . getmembers ( module ) : if name != 'Command' and 'Command' in name : if name != 'GlimCommand' : cobject = getattr ( module , name ) commands . append ( cobject ) return commands | Function smartly imports Command type classes given module |
13,996 | def match ( self , args ) : command = None for c in self . commands : if c . name == args . which : c . args = args command = c break return command | Function dispatches the active command line utility . |
13,997 | def dispatch ( self , command , app ) : if self . is_glimcommand ( command ) : command . run ( app ) else : command . run ( ) | Function runs the active command . |
13,998 | def replace_sequence ( self , pdb_ID , chain_id , replacement_sequence ) : old_sequences = self . sequences old_unique_sequences = self . unique_sequences self . sequences = [ ] self . unique_sequences = { } for s in old_sequences : if s [ 0 ] == pdb_ID and s [ 1 ] == chain_id : self . _add_sequence ( pdb_ID , chain_id... | Replaces a sequence with another . Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match . |
13,999 | def retrieve ( pdb_id , cache_dir = None , bio_cache = None ) : pdb_id = pdb_id . upper ( ) if bio_cache : return FASTA ( bio_cache . get_fasta_contents ( pdb_id ) ) if cache_dir : filename = os . path . join ( cache_dir , "%s.fasta" % pdb_id ) if os . path . exists ( filename ) : return FASTA ( read_file ( filename ) ... | Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.