idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
10,300
def _edges_from_permutation ( self , feature_pathway_dict ) : network_edges = { } for feature , pathway_list in feature_pathway_dict . items ( ) : for i in range ( len ( pathway_list ) ) : for j in range ( i + 1 , len ( pathway_list ) ) : vertex_i = pathway_list [ i ] vertex_j = pathway_list [ j ] new_edge = self . edge_tuple ( vertex_i , vertex_j ) if new_edge not in network_edges : network_edges [ new_edge ] = [ ] network_edges [ new_edge ] . append ( feature ) self . _augment_network ( network_edges )
Given a dictionary mapping each feature to the pathways overrepresented in the feature build a CoNetwork by creating edges for every pairwise combination of pathways in a feature .
10,301
def normalize_text ( self , text ) : if not self . editor . free_format : text = ' ' * 6 + text [ 6 : ] return text . upper ( )
Normalize text when fixed format is ON replace the first 6 chars by a space .
10,302
def get_neighborhood_network ( self , node_name : str , order : int = 1 ) -> Graph : logger . info ( "In get_neighborhood_graph()" ) neighbors = list ( self . get_neighbor_names ( node_name , order ) ) neighbor_network = self . graph . copy ( ) neighbor_network . delete_vertices ( self . graph . vs . select ( name_notin = neighbors ) ) return neighbor_network
Get the neighborhood graph of a node .
10,303
def get_neighbor_names ( self , node_name : str , order : int = 1 ) -> list : logger . info ( "In get_neighbor_names()" ) node = self . graph . vs . find ( name = node_name ) neighbors = self . graph . neighborhood ( node , order = order ) names = self . graph . vs [ neighbors ] [ "name" ] names . append ( node_name ) return list ( names )
Get the names of all neighbors of a node and the node itself .
10,304
def get_neighborhood_overlap ( self , node1 , node2 , connection_type = None ) : if connection_type is None or connection_type == "direct" : order = 1 elif connection_type == "second-degree" : order = 2 else : raise Exception ( "Invalid option: {}. Valid options are direct and second-degree" . format ( connection_type ) ) neighbors1 = self . graph . neighborhood ( node1 , order = order ) neighbors2 = self . graph . neighborhood ( node2 , order = order ) return set ( neighbors1 ) . intersection ( neighbors2 )
Get the intersection of two nodes s neighborhoods .
10,305
def parse ( cls , parser , text , pos ) : if not text . strip ( ) : return text , SyntaxError ( "Invalid value" ) class Rule ( object ) : grammar = attr ( 'value' , SpiresSimpleValue ) , omit ( re . compile ( ".*" ) ) try : tree = pypeg2 . parse ( text , Rule , whitespace = "" ) except SyntaxError : return text , SyntaxError ( "Expected %r" % cls ) else : r = tree . value if r . value . lower ( ) in ( 'and' , 'or' , 'not' ) : return text , SyntaxError ( "Invalid value %s" % r . value ) return text [ len ( r . value ) : ] , r
Match simple values excluding some Keywords like and and or
10,306
def get_field_infos ( code , free_format ) : offset = 0 field_infos = [ ] lines = _clean_code ( code ) previous_offset = 0 for row in process_cobol ( lines , free_format ) : fi = PicFieldInfo ( ) fi . name = row [ "name" ] fi . level = row [ "level" ] fi . pic = row [ "pic" ] fi . occurs = row [ "occurs" ] fi . redefines = row [ "redefines" ] fi . indexed_by = row [ "indexed_by" ] if fi . redefines : for fib in field_infos : if fib . name == fi . redefines : offset = fib . offset if fi . level == 1 : offset = 1 if fi . level == 78 : offset = 0 if fi . level == 77 : offset = 1 fi . offset = offset if fi . level == 88 : fi . offset = previous_offset else : previous_offset = offset field_infos . append ( fi ) if row [ 'pic' ] : offset += row [ 'pic_info' ] [ 'length' ] return field_infos
Gets the list of pic fields information from line |start| to line |end| .
10,307
def get_signed_url ( self , params ) : params [ 'client' ] = self . client_id url_params = { 'protocol' : self . protocol , 'domain' : self . domain , 'service' : self . service , 'params' : urlencode ( params ) } secret = base64 . urlsafe_b64decode ( self . secret_key ) url_params [ 'url_part' ] = ( '/maps/api/%(service)s/json?%(params)s' % url_params ) signature = hmac . new ( secret , url_params [ 'url_part' ] , hashlib . sha1 ) url_params [ 'signature' ] = base64 . urlsafe_b64encode ( signature . digest ( ) ) return ( '%(protocol)s://%(domain)s%(url_part)s' '&signature=%(signature)s' % url_params )
Returns a Premier account signed url .
10,308
def parse_json ( self , page ) : if not isinstance ( page , basestring ) : page = util . decode_page ( page ) self . doc = json . loads ( page ) results = self . doc . get ( self . result_name , [ ] ) if not results : self . check_status ( self . doc . get ( 'status' ) ) return None return results
Returns json feed .
10,309
def _determine_case ( was_upper , words , string ) : case_type = 'unknown' if was_upper : case_type = 'upper' elif string . islower ( ) : case_type = 'lower' elif len ( words ) > 0 : camel_case = words [ 0 ] . islower ( ) pascal_case = words [ 0 ] . istitle ( ) or words [ 0 ] . isupper ( ) if camel_case or pascal_case : for word in words [ 1 : ] : c = word . istitle ( ) or word . isupper ( ) camel_case &= c pascal_case &= c if not c : break if camel_case : case_type = 'camel' elif pascal_case : case_type = 'pascal' else : case_type = 'mixed' return case_type
Determine case type of string .
10,310
def _advanced_acronym_detection ( s , i , words , acronyms ) : acstr = '' . join ( words [ s : i ] ) range_list = [ ] not_range = set ( range ( len ( acstr ) ) ) for acronym in acronyms : rac = regex . compile ( unicode ( acronym ) ) n = 0 while True : m = rac . search ( acstr , n ) if not m : break a , b = m . start ( ) , m . end ( ) n = b ok = True for r in range_list : if a < r [ 1 ] and b > r [ 0 ] : ok = False break if ok : range_list . append ( ( a , b ) ) for j in xrange ( a , b ) : not_range . remove ( j ) for nr in not_range : range_list . append ( ( nr , nr + 1 ) ) range_list . sort ( ) for _ in xrange ( s , i ) : del words [ s ] for j in xrange ( len ( range_list ) ) : r = range_list [ j ] words . insert ( s + j , acstr [ r [ 0 ] : r [ 1 ] ] ) return s + len ( range_list ) - 1
Detect acronyms by checking against a list of acronyms .
10,311
def _simple_acronym_detection ( s , i , words , * args ) : acronym = '' . join ( words [ s : i ] ) for _ in xrange ( s , i ) : del words [ s ] words . insert ( s , '' . join ( acronym ) ) return s
Detect acronyms based on runs of upper - case letters .
10,312
def _sanitize_acronyms ( unsafe_acronyms ) : valid_acronym = regex . compile ( u'^[\p{Ll}\p{Lu}\p{Nd}]+$' ) acronyms = [ ] for a in unsafe_acronyms : if valid_acronym . match ( a ) : acronyms . append ( a . upper ( ) ) else : raise InvalidAcronymError ( a ) return acronyms
Check acronyms against regex .
10,313
def _normalize_words ( words , acronyms ) : for i , _ in enumerate ( words ) : if words [ i ] . upper ( ) in acronyms : words [ i ] = words [ i ] . upper ( ) else : if not words [ i ] . isupper ( ) : words [ i ] = words [ i ] . capitalize ( ) return words
Normalize case of each word to PascalCase .
10,314
def _separate_words ( string ) : words = [ ] separator = "" i = 1 s = 0 p = string [ 0 : 1 ] was_upper = False if string . isupper ( ) : string = string . lower ( ) was_upper = True while i <= len ( string ) : c = string [ i : i + 1 ] split = False if i < len ( string ) : if UPPER . match ( c ) : split = True elif NOTSEP . match ( c ) and SEP . match ( p ) : split = True elif SEP . match ( c ) and NOTSEP . match ( p ) : split = True else : split = True if split : if NOTSEP . match ( p ) : words . append ( string [ s : i ] ) else : if not separator : separator = string [ s : s + 1 ] words . append ( None ) s = i i += 1 p = c return words , separator , was_upper
Segment string on separator into list of words .
10,315
def parse_case ( string , acronyms = None , preserve_case = False ) : words , separator , was_upper = _separate_words ( string ) if acronyms : acronyms = _sanitize_acronyms ( acronyms ) check_acronym = _advanced_acronym_detection else : acronyms = [ ] check_acronym = _simple_acronym_detection i = 0 s = None while i < len ( words ) : word = words [ i ] if word is not None and UPPER . match ( word ) : if s is None : s = i elif s is not None : i = check_acronym ( s , i , words , acronyms ) + 1 s = None i += 1 if s is not None : check_acronym ( s , i , words , acronyms ) words = [ w for w in words if w is not None ] case_type = _determine_case ( was_upper , words , string ) if preserve_case : if was_upper : words = [ w . upper ( ) for w in words ] else : words = _normalize_words ( words , acronyms ) return words , case_type , separator
Parse a stringiable into a list of words .
10,316
def send_email ( self , user , subject , msg ) : print ( 'To:' , user ) print ( 'Subject:' , subject ) print ( msg )
Should be overwritten in the setup
10,317
def update ( self , new_data : IntentDict ) : for locale , data in new_data . items ( ) : if locale not in self . dict : self . dict [ locale ] = { } self . dict [ locale ] . update ( data )
Receive an update from the loaders .
10,318
def get ( self , key : Text , locale : Optional [ Text ] ) -> List [ Tuple [ Text , ... ] ] : locale = self . choose_locale ( locale ) return self . dict [ locale ] [ key ]
Get a single set of intents .
10,319
async def strings ( self , request : Optional [ 'Request' ] = None ) -> List [ Tuple [ Text , ... ] ] : if request : locale = await request . get_locale ( ) else : locale = None return self . db . get ( self . key , locale )
For the given request find the list of strings of that intent . If the intent does not exist it will raise a KeyError .
10,320
def get_unitless_standard_names ( ) : global _UNITLESS_DB if _UNITLESS_DB is None : with open ( resource_filename ( 'cc_plugin_ncei' , 'data/unitless.json' ) , 'r' ) as f : _UNITLESS_DB = json . load ( f ) return _UNITLESS_DB
Returns a list of valid standard_names that are allowed to be unitless
10,321
def get_lat_variable ( nc ) : if 'latitude' in nc . variables : return 'latitude' latitudes = nc . get_variables_by_attributes ( standard_name = "latitude" ) if latitudes : return latitudes [ 0 ] . name return None
Returns the variable for latitude
10,322
def get_lon_variable ( nc ) : if 'longitude' in nc . variables : return 'longitude' longitudes = nc . get_variables_by_attributes ( standard_name = "longitude" ) if longitudes : return longitudes [ 0 ] . name return None
Returns the variable for longitude
10,323
def get_crs_variable ( ds ) : for var in ds . variables : grid_mapping = getattr ( ds . variables [ var ] , 'grid_mapping' , '' ) if grid_mapping and grid_mapping in ds . variables : return grid_mapping return None
Returns the name of the variable identified by a grid_mapping attribute
10,324
def is_2d_regular_grid ( nc , variable ) : dims = nc . variables [ variable ] . dimensions cmatrix = coordinate_dimension_matrix ( nc ) for req in ( 'x' , 'y' , 't' ) : if req not in cmatrix : return False x = get_lon_variable ( nc ) y = get_lat_variable ( nc ) t = get_time_variable ( nc ) if cmatrix [ 'x' ] != ( x , ) : return False if cmatrix [ 'y' ] != ( y , ) : return False if cmatrix [ 't' ] != ( t , ) : return False if len ( dims ) == 3 and x in dims and y in dims and t in dims : return True return False
Returns True if the variable is a 2D Regular grid .
10,325
def handle_read ( repo , ** kwargs ) : log . info ( 'read: %s %s' % ( repo , kwargs ) ) if type ( repo ) in [ unicode , str ] : return { 'name' : 'Repo' , 'desc' : 'Welcome to Grit' , 'comment' : '' } else : return repo . serialize ( )
handles reading repo information
10,326
def dict_from_object ( obj : object ) : return ( obj if isinstance ( obj , dict ) else { attr : getattr ( obj , attr ) for attr in dir ( obj ) if not attr . startswith ( '_' ) } )
Convert a object into dictionary with all of its readable attributes .
10,327
def xgetattr ( obj : object , name : str , default = _sentinel , getitem = False ) : if isinstance ( obj , dict ) : if getitem : return obj [ name ] else : val = obj . get ( name , default ) return None if val is _sentinel else val else : val = getattr ( obj , name , default ) if val is _sentinel : msg = '%r object has no attribute %r' % ( obj . __class__ , name ) raise AttributeError ( msg ) else : return val
Get attribute value from object .
10,328
def list_config_files ( ) -> List [ Text ] : return [ os . path . join ( os . path . dirname ( __file__ ) , 'default_settings.py' ) , os . getenv ( ENVIRONMENT_VARIABLE , '' ) , ]
This function returns the list of configuration files to load .
10,329
def camel_to_snake_case ( name ) : pattern = r'[A-Z][a-z]+|[A-Z]+(?![a-z])' return '_' . join ( map ( str . lower , re . findall ( pattern , name ) ) )
Takes a camelCased string and converts to snake_case .
10,330
def match_prefix ( prefix , line ) : m = re . match ( prefix , line . expandtabs ( 4 ) ) if not m : if re . match ( prefix , line . expandtabs ( 4 ) . replace ( '\n' , ' ' * 99 + '\n' ) ) : return len ( line ) - 1 return - 1 pos = m . end ( ) if pos == 0 : return 0 for i in range ( 1 , len ( line ) + 1 ) : if len ( line [ : i ] . expandtabs ( 4 ) ) >= pos : return i
Check if the line starts with given prefix and return the position of the end of prefix . If the prefix is not matched return - 1 .
10,331
def expect_re ( self , regexp ) : prefix_len = self . match_prefix ( self . prefix , self . next_line ( require_prefix = False ) ) if prefix_len >= 0 : match = self . _expect_re ( regexp , self . pos + prefix_len ) self . match = match return match else : return None
Test against the given regular expression and returns the match object .
10,332
def next_line ( self , require_prefix = True ) : if require_prefix : m = self . expect_re ( r'(?m)[^\n]*?$\n?' ) else : m = self . _expect_re ( r'(?m)[^\n]*$\n?' , self . pos ) self . match = m if m : return m . group ( )
Return the next line in the source .
10,333
def consume ( self ) : if self . match : self . pos = self . match . end ( ) if self . match . group ( ) [ - 1 ] == '\n' : self . _update_prefix ( ) self . match = None
Consume the body of source . pos will move forward .
10,334
def asDict ( self ) : ret = { } for field in BackgroundTaskInfo . FIELDS : ret [ field ] = getattr ( self , field ) return ret
asDict - Returns a copy of the current state as a dictionary . This copy will not be updated automatically .
10,335
def register ( name , _callable = None ) : def wrapper ( _callable ) : registered_checks [ name ] = _callable return _callable if _callable : return wrapper ( _callable ) return wrapper
A decorator used for register custom check .
10,336
def serialize ( self ) : if self . response is not None : return { 'messaging_type' : 'RESPONSE' } if self . update is not None : return { 'messaging_type' : 'UPDATE' } if self . tag is not None : return { 'messaging_type' : 'MESSAGE_TAG' , 'tag' : self . tag . value , } if self . subscription is not None : return { 'messaging_type' : 'NON_PROMOTIONAL_SUBSCRIPTION' }
Generates the messaging - type - related part of the message dictionary .
10,337
async def patch_register ( self , register : Dict , request : 'Request' ) : register [ 'choices' ] = { o . slug : { 'intent' : o . intent . key if o . intent else None , 'text' : await render ( o . text , request ) , } for o in self . options if isinstance ( o , QuickRepliesList . TextOption ) } return register
Store all options in the choices sub - register . We store both the text and the potential intent in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies .
10,338
def is_sharable ( self ) : return bool ( self . sharable and all ( x . is_sharable ( ) for x in self . elements ) )
Can only be sharable if marked as such and no child element is blocking sharing due to security reasons .
10,339
def from_value ( cls , value ) : warnings . warn ( "{0}.{1} will be deprecated in a future release. " "Please use {0}.{2} instead" . format ( cls . __name__ , cls . from_value . __name__ , cls . get . __name__ ) , PendingDeprecationWarning ) return cls [ value ]
Return the EChoice object associated with this value if any .
10,340
def bernard_auth ( func ) : @ wraps ( func ) async def wrapper ( request : Request ) : def get_query_token ( ) : token_key = settings . WEBVIEW_TOKEN_KEY return request . query . get ( token_key , '' ) def get_header_token ( ) : header_key = settings . WEBVIEW_HEADER_NAME return request . headers . get ( header_key , '' ) try : token = next ( filter ( None , [ get_header_token ( ) , get_query_token ( ) , ] ) ) except StopIteration : token = '' try : body = await request . json ( ) except ValueError : body = None msg , platform = await manager . message_from_token ( token , body ) if not msg : return json_response ( { 'status' : 'unauthorized' , 'message' : 'No valid token found' , } , status = 401 ) return await func ( msg , platform ) return wrapper
Authenticates the users based on the query - string - provided token
10,341
async def postback_me ( msg : BaseMessage , platform : Platform ) -> Response : async def get_basic_info ( _msg : BaseMessage , _platform : Platform ) : user = _msg . get_user ( ) return { 'friendly_name' : await user . get_friendly_name ( ) , 'locale' : await user . get_locale ( ) , 'platform' : _platform . NAME , } func = MiddlewareManager . instance ( ) . get ( 'api_postback_me' , get_basic_info ) return json_response ( await func ( msg , platform ) )
Provides the front - end with details about the user . This output can be completed using the api_postback_me middleware hook .
10,342
async def postback_send ( msg : BaseMessage , platform : Platform ) -> Response : await platform . inject_message ( msg ) return json_response ( { 'status' : 'ok' , } )
Injects the POST body into the FSM as a Postback message .
10,343
async def postback_analytics ( msg : BaseMessage , platform : Platform ) -> Response : try : pb = msg . get_layers ( ) [ 0 ] assert isinstance ( pb , Postback ) user = msg . get_user ( ) user_lang = await user . get_locale ( ) user_id = user . id if pb . payload [ 'event' ] == 'page_view' : func = 'page_view' path = pb . payload [ 'path' ] title = pb . payload . get ( 'title' , '' ) args = [ path , title , user_id , user_lang ] else : return json_response ( { 'status' : 'unknown event' , 'message' : f'"{pb.payload["event"]}" is not a recognized ' f'analytics event' , } ) async for p in providers ( ) : await getattr ( p , func ) ( * args ) except ( KeyError , IndexError , AssertionError , TypeError ) : return json_response ( { 'status' : 'missing data' } , status = 400 ) else : return json_response ( { 'status' : 'ok' , } )
Makes a call to an analytics function .
10,344
def register ( name , func = None ) : def decorator ( func ) : ControlDaemon . _register ( name , func ) return func if func : return decorator ( func ) else : return decorator
Function or decorator which registers a given function as a recognized control command .
10,345
def ping ( daemon , channel , data = None ) : if not channel : return node_name = daemon . config [ 'control' ] . get ( 'node_name' ) reply = [ 'pong' ] if node_name or data : reply . append ( node_name or '' ) if data : reply . append ( data ) with utils . ignore_except ( ) : daemon . db . publish ( channel , ':' . join ( reply ) )
Process the ping control message .
10,346
def reload ( daemon , load_type = None , spread = None ) : if load_type == 'immediate' : spread = None elif load_type == 'spread' : try : spread = float ( spread ) except ( TypeError , ValueError ) : load_type = None else : load_type = None if load_type is None : try : spread = float ( daemon . config [ 'control' ] [ 'reload_spread' ] ) except ( TypeError , ValueError , KeyError ) : spread = None if spread : eventlet . spawn_after ( random . random ( ) * spread , daemon . reload ) else : eventlet . spawn_n ( daemon . reload )
Process the reload control message .
10,347
def set_limits ( self , limits ) : chksum = hashlib . md5 ( ) for lim in limits : chksum . update ( lim ) new_sum = chksum . hexdigest ( ) with self . limit_lock : if self . limit_sum == new_sum : return self . limit_data = [ msgpack . loads ( lim ) for lim in limits ] self . limit_sum = new_sum
Set the limit data to the given list of limits . Limits are specified as the raw msgpack string representing the limit . Computes the checksum of the limits ; if the checksum is identical to the current one no action is taken .
10,348
def start ( self ) : self . listen_thread = eventlet . spawn_n ( self . listen ) self . reload ( )
Starts the ControlDaemon by launching the listening thread and triggering the initial limits load .
10,349
def listen ( self ) : db = self . config . get_database ( 'control' ) kwargs = { } if 'shard_hint' in self . config [ 'control' ] : kwargs [ 'shard_hint' ] = self . config [ 'control' ] [ 'shard_hint' ] pubsub = db . pubsub ( ** kwargs ) channel = self . config [ 'control' ] . get ( 'channel' , 'control' ) pubsub . subscribe ( channel ) for msg in pubsub . listen ( ) : if ( msg [ 'type' ] in ( 'pmessage' , 'message' ) and msg [ 'channel' ] == channel ) : command , _sep , args = msg [ 'data' ] . partition ( ':' ) if not command : continue if command [ 0 ] == '_' : LOG . error ( "Cannot call internal command %r" % command ) continue if command in self . _commands : func = self . _commands [ command ] else : func = utils . find_entrypoint ( 'turnstile.command' , command , compat = False ) self . _commands [ command ] = func if not func : LOG . error ( "No such command %r" % command ) continue arglist = args . split ( ':' ) if args else [ ] try : func ( self , * arglist ) except Exception : LOG . exception ( "Failed to execute command %r arguments %r" % ( command , arglist ) ) continue
Listen for incoming control messages .
10,350
def reload ( self ) : if not self . pending . acquire ( False ) : return control_args = self . config [ 'control' ] try : key = control_args . get ( 'limits_key' , 'limits' ) self . limits . set_limits ( self . db . zrange ( key , 0 , - 1 ) ) except Exception : LOG . exception ( "Could not load limits" ) error_key = control_args . get ( 'errors_key' , 'errors' ) error_channel = control_args . get ( 'errors_channel' , 'errors' ) msg = "Failed to load limits: " + traceback . format_exc ( ) with utils . ignore_except ( ) : self . db . sadd ( error_key , msg ) with utils . ignore_except ( ) : self . db . publish ( error_channel , msg ) finally : self . pending . release ( )
Reloads the limits configuration from the database .
10,351
def enforce_policy ( rule ) : def wrapper ( func ) : @ functools . wraps ( func ) def wrapped ( * args , ** kwargs ) : if enforcer . enforce ( rule , { } , g . cred ) : return func ( * args , ** kwargs ) return wrapped return wrapper
Enforce a policy to a API .
10,352
def initialize ( config ) : if 'redis_client' in config : client = utils . find_entrypoint ( 'turnstile.redis_client' , config [ 'redis_client' ] , required = True ) else : client = redis . StrictRedis kwargs = { } for cfg_var , type_ in REDIS_CONFIGS . items ( ) : if cfg_var in config : kwargs [ cfg_var ] = type_ ( config [ cfg_var ] ) if 'host' not in kwargs and 'unix_socket_path' not in kwargs : raise redis . ConnectionError ( "No host specified for redis database" ) cpool_class = None cpool = { } extra_kwargs = { } for key , value in config . items ( ) : if key . startswith ( 'connection_pool.' ) : _dummy , _sep , varname = key . partition ( '.' ) if varname == 'connection_class' : cpool [ varname ] = utils . find_entrypoint ( 'turnstile.connection_class' , value , required = True ) elif varname == 'max_connections' : cpool [ varname ] = int ( value ) elif varname == 'parser_class' : cpool [ varname ] = utils . find_entrypoint ( 'turnstile.parser_class' , value , required = True ) else : cpool [ varname ] = value elif key not in REDIS_CONFIGS and key not in REDIS_EXCLUDES : extra_kwargs [ key ] = value if cpool : cpool_class = redis . ConnectionPool if 'connection_pool' in config : cpool_class = utils . find_entrypoint ( 'turnstile.connection_pool' , config [ 'connection_pool' ] , required = True ) if cpool_class : cpool . update ( kwargs ) if 'connection_class' not in cpool : if 'unix_socket_path' in cpool : if 'host' in cpool : del cpool [ 'host' ] if 'port' in cpool : del cpool [ 'port' ] cpool [ 'path' ] = cpool [ 'unix_socket_path' ] del cpool [ 'unix_socket_path' ] cpool [ 'connection_class' ] = redis . UnixDomainSocketConnection else : cpool [ 'connection_class' ] = redis . Connection kwargs = dict ( connection_pool = cpool_class ( ** cpool ) ) kwargs . update ( extra_kwargs ) return client ( ** kwargs )
Initialize a connection to the Redis database .
10,353
def limits_hydrate ( db , lims ) : return [ limits . Limit . hydrate ( db , lim ) for lim in lims ]
Helper function to hydrate a list of limits .
10,354
def limit_update ( db , key , limits ) : desired = [ msgpack . dumps ( l . dehydrate ( ) ) for l in limits ] desired_set = set ( desired ) with db . pipeline ( ) as pipe : while True : try : pipe . watch ( key ) existing = set ( pipe . zrange ( key , 0 , - 1 ) ) pipe . multi ( ) for lim in existing - desired_set : pipe . zrem ( key , lim ) for idx , lim in enumerate ( desired ) : pipe . zadd ( key , ( idx + 1 ) * 10 , lim ) pipe . execute ( ) except redis . WatchError : continue else : break
Safely updates the list of limits in the database .
10,355
def command ( db , channel , command , * args ) : cmd = [ command ] cmd . extend ( str ( a ) for a in args ) db . publish ( channel , ':' . join ( cmd ) )
Utility function to issue a command to all Turnstile instances .
10,356
def _tokenize_latex ( self , exp ) : tokens = [ ] prevexp = "" while exp : t , exp = self . _get_next_token ( exp ) if t . strip ( ) != "" : tokens . append ( t ) if prevexp == exp : break prevexp = exp return tokens
Internal method to tokenize latex
10,357
def _convert_query ( self , query ) : query = self . dictionary . doc2bow ( self . _tokenize_latex ( query ) ) sims = self . index [ query ] neighbors = sorted ( sims , key = lambda item : - item [ 1 ] ) neighbors = { "neighbors" : [ { self . columns [ 0 ] : { "data" : self . docs [ n [ 0 ] ] , "fmt" : "math" } , self . columns [ 1 ] : { "data" : float ( n [ 1 ] ) } } for n in neighbors ] } if neighbors else { "neighbors" : [ ] } return neighbors
Convert query into an indexable string .
10,358
def join ( path1 , path2 ) : if path1 . endswith ( '/' ) and path2 . startswith ( '/' ) : return '' . join ( [ path1 , path2 [ 1 : ] ] ) elif path1 . endswith ( '/' ) or path2 . startswith ( '/' ) : return '' . join ( [ path1 , path2 ] ) else : return '' . join ( [ path1 , '/' , path2 ] )
nicely join two path elements together
10,359
def print_math ( math_expression_lst , name = "math.html" , out = 'html' , formatter = lambda x : x ) : try : shutil . rmtree ( 'viz' ) except : pass pth = get_cur_path ( ) + print_math_template_path shutil . copytree ( pth , 'viz' ) html_loc = None if out == "html" : html_loc = pth + "standalone_index.html" if out == "notebook" : from IPython . display import display , HTML html_loc = pth + "notebook_index.html" html = open ( html_loc ) . read ( ) html = html . replace ( "__MATH_LIST__" , json . dumps ( math_expression_lst ) ) if out == "notebook" : display ( HTML ( html ) ) elif out == "html" : with open ( name , "w+" ) as out_f : out_f . write ( html )
Converts LaTeX math expressions into an html layout . Creates a html file in the directory where print_math is called by default . Displays math to jupyter notebook if notebook argument is specified .
10,360
def log_error ( self , msg , * args ) : if self . _logger is not None : self . _logger . error ( msg , * args ) else : print ( msg % args )
Log an error or print in stdout if no logger .
10,361
def _get_value_opc_attr ( self , attr_name , prec_decimals = 2 ) : try : value = getattr ( self , attr_name ) if value is not None : return round ( value , prec_decimals ) except I2cVariableNotImplemented : pass return None
Return sensor attribute with precission or None if not present .
10,362
def current_state_str ( self ) : if self . sample_ok : msg = '' temperature = self . _get_value_opc_attr ( 'temperature' ) if temperature is not None : msg += 'Temp: %s ºC, ' emperature humidity = self . _get_value_opc_attr ( 'humidity' ) if humidity is not None : msg += 'Humid: %s %%, ' % humidity pressure = self . _get_value_opc_attr ( 'pressure' ) if pressure is not None : msg += 'Press: %s mb, ' % pressure light_level = self . _get_value_opc_attr ( 'light_level' ) if light_level is not None : msg += 'Light: %s lux, ' % light_level return msg [ : - 2 ] else : return "Bad sample"
Return string representation of the current state of the sensor .
10,363
def _applyMultichan ( samples , func ) : if len ( samples . shape ) == 1 or samples . shape [ 1 ] == 1 : newsamples = func ( samples ) else : y = np . array ( [ ] ) for i in range ( samples . shape [ 1 ] ) : y = np . concatenate ( ( y , func ( samples [ : , i ] ) ) ) newsamples = y . reshape ( samples . shape [ 1 ] , - 1 ) . T return newsamples
Apply func to each channel of audio data in samples
10,364
def _resample_obspy ( samples , sr , newsr , window = 'hanning' , lowpass = True ) : from scipy . signal import resample from math import ceil factor = sr / float ( newsr ) if newsr < sr and lowpass : if factor > 16 : logger . info ( "Automatic filter design is unstable for resampling " "factors (current sampling rate/new sampling rate) " "above 16. Manual resampling is necessary." ) freq = min ( sr , newsr ) * 0.5 / float ( factor ) logger . debug ( f"resample_obspy: lowpass {freq}" ) samples = lowpass_cheby2 ( samples , freq = freq , sr = sr , maxorder = 12 ) num = int ( ceil ( len ( samples ) / factor ) ) return _applyMultichan ( samples , lambda S : resample ( S , num , window = window ) )
Resample using Fourier method . The same as resample_scipy but with low - pass filtering for upsampling
10,365
def resample ( samples , oldsr , newsr ) : backends = [ _resample_samplerate , _resample_scikits , _resample_nnresample , _resample_obspy , _resample_scipy ] for backend in backends : newsamples = backend ( samples , oldsr , newsr ) if newsamples is not None : return newsamples
Resample samples with given samplerate sr to new samplerate newsr
10,366
def get_package_version ( ) : base = os . path . abspath ( os . path . dirname ( __file__ ) ) with open ( os . path . join ( base , 'policy' , '__init__.py' ) , mode = 'rt' , encoding = 'utf-8' ) as initf : for line in initf : m = version . match ( line . strip ( ) ) if not m : continue return m . groups ( ) [ 0 ]
return package version without importing it
10,367
def get_long_description ( ) : base = os . path . abspath ( os . path . dirname ( __file__ ) ) readme_file = os . path . join ( base , 'README.md' ) with open ( readme_file , mode = 'rt' , encoding = 'utf-8' ) as readme : return readme . read ( )
return package s long description
10,368
def get_install_requires ( ) : base = os . path . abspath ( os . path . dirname ( __file__ ) ) requirements_file = os . path . join ( base , 'requirements.txt' ) if not os . path . exists ( requirements_file ) : return [ ] with open ( requirements_file , mode = 'rt' , encoding = 'utf-8' ) as f : return f . read ( ) . splitlines ( )
return package s install requires
10,369
def main ( flags ) : dl = SheetDownloader ( flags ) dl . init ( ) for file_info in settings . GOOGLE_SHEET_SYNC [ 'files' ] : print ( 'Downloading {}' . format ( file_info [ 'path' ] ) ) dl . download_sheet ( file_info [ 'path' ] , file_info [ 'sheet' ] , file_info [ 'range' ] , )
Download all sheets as configured .
10,370
def download_sheet ( self , file_path , sheet_id , cell_range ) : result = self . service . spreadsheets ( ) . values ( ) . get ( spreadsheetId = sheet_id , range = cell_range , ) . execute ( ) values = result . get ( 'values' , [ ] ) with open ( file_path , newline = '' , encoding = 'utf-8' , mode = 'w' ) as f : writer = csv . writer ( f , lineterminator = '\n' ) for row in values : writer . writerow ( row )
Download the cell range from the sheet and store it as CSV in the file_path file .
10,371
def getFriendlyString ( self ) : if self . _friendlyString is not None : return self . _friendlyString resultComponents = [ self . getIntMajor ( ) , self . getIntMinor ( ) , self . getIntBuild ( ) , self . getIntRevision ( ) ] for i in range ( len ( resultComponents ) - 1 , - 1 , - 1 ) : if resultComponents [ i ] == 0 : del resultComponents [ i ] else : break result = "." . join ( map ( str , resultComponents ) ) self . _friendlyString = result return result
Returns the version printed in a friendly way .
10,372
def getVersions ( self ) : if not os . path . exists ( self . _path ) : return [ ] result = [ ] for entryName in os . listdir ( self . _path ) : try : entryVersion = Version ( entryName ) result . append ( entryVersion ) except InvalidVersionException : continue return result
Returns the versions of the suitable entries available in the directory - an empty list if no such entry is available
10,373
def setUsers ( self , * args , ** kwargs ) : try : usrs = [ us for us in self . mambuusersclass ( branchId = self [ 'id' ] , * args , ** kwargs ) if us [ 'userState' ] == "ACTIVE" ] except AttributeError as ae : from . mambuuser import MambuUsers self . mambuusersclass = MambuUsers usrs = [ us for us in self . mambuusersclass ( branchId = self [ 'id' ] , * args , ** kwargs ) if us [ 'userState' ] == "ACTIVE" ] self [ 'users' ] = usrs return 1
Adds the active users for this branch to a users field .
10,374
def unindent ( self ) : _logger ( ) . debug ( 'unindent' ) cursor = self . editor . textCursor ( ) _logger ( ) . debug ( 'cursor has selection %r' , cursor . hasSelection ( ) ) if cursor . hasSelection ( ) : cursor . beginEditBlock ( ) self . unindent_selection ( cursor ) cursor . endEditBlock ( ) self . editor . setTextCursor ( cursor ) else : tab_len = self . editor . tab_length indentation = cursor . positionInBlock ( ) indentation -= self . min_column if indentation == 0 : return max_spaces = indentation % tab_len if max_spaces == 0 : max_spaces = tab_len spaces = self . count_deletable_spaces ( cursor , max_spaces ) _logger ( ) . info ( 'deleting %d space before cursor' % spaces ) cursor . beginEditBlock ( ) for _ in range ( spaces ) : cursor . deletePreviousChar ( ) cursor . endEditBlock ( ) self . editor . setTextCursor ( cursor ) _logger ( ) . debug ( cursor . block ( ) . text ( ) )
Un - indents text at cursor position .
10,375
def with_name ( cls , name , id_user = 0 , ** extra_data ) : return cls ( name = name , id_user = 0 , ** extra_data )
Instantiate a WorkflowEngine given a name or UUID .
10,376
def from_uuid ( cls , uuid , ** extra_data ) : model = Workflow . query . get ( uuid ) if model is None : raise LookupError ( "No workflow with UUID {} was found" . format ( uuid ) ) instance = cls ( model = model , ** extra_data ) instance . objects = WorkflowObjectModel . query . filter ( WorkflowObjectModel . id_workflow == uuid , WorkflowObjectModel . id_parent == None , ) . all ( ) return instance
Load an existing workflow from the database given a UUID .
10,377
def continue_object ( self , workflow_object , restart_point = 'restart_task' , task_offset = 1 , stop_on_halt = False ) : translate = { 'restart_task' : 'current' , 'continue_next' : 'next' , 'restart_prev' : 'prev' , } self . state . callback_pos = workflow_object . callback_pos or [ 0 ] self . restart ( task = translate [ restart_point ] , obj = 'first' , objects = [ workflow_object ] , stop_on_halt = stop_on_halt )
Continue workflow for one given object from restart_point .
10,378
def has_completed ( self ) : objects_in_db = WorkflowObjectModel . query . filter ( WorkflowObjectModel . id_workflow == self . uuid , WorkflowObjectModel . id_parent == None , ) . filter ( WorkflowObjectModel . status . in_ ( [ workflow_object_class . known_statuses . COMPLETED ] ) ) . count ( ) return objects_in_db == len ( list ( self . objects ) )
Return True if workflow is fully completed .
10,379
def set_workflow_by_name ( self , workflow_name ) : from . proxies import workflows if workflow_name not in workflows : raise WorkflowDefinitionError ( "Workflow '%s' does not exist" % ( workflow_name , ) , workflow_name = workflow_name ) self . workflow_definition = workflows [ workflow_name ] self . callbacks . replace ( self . workflow_definition . workflow )
Configure the workflow to run by the name of this one .
10,380
def after_each_callback ( eng , callback_func , obj ) : obj . callback_pos = eng . state . callback_pos obj . extra_data [ "_last_task_name" ] = callback_func . __name__ task_history = get_task_history ( callback_func ) if "_task_history" not in obj . extra_data : obj . extra_data [ "_task_history" ] = [ task_history ] else : obj . extra_data [ "_task_history" ] . append ( task_history )
Take action after every WF callback .
10,381
def before_object ( eng , objects , obj ) : super ( InvenioProcessingFactory , InvenioProcessingFactory ) . before_object ( eng , objects , obj ) if "_error_msg" in obj . extra_data : del obj . extra_data [ "_error_msg" ] db . session . commit ( )
Take action before the processing of an object begins .
10,382
def after_object ( eng , objects , obj ) : super ( InvenioProcessingFactory , InvenioProcessingFactory ) . after_object ( eng , objects , obj ) obj . save ( status = obj . known_statuses . COMPLETED , id_workflow = eng . model . uuid ) db . session . commit ( )
Take action once the proccessing of an object completes .
10,383
def before_processing ( eng , objects ) : super ( InvenioProcessingFactory , InvenioProcessingFactory ) . before_processing ( eng , objects ) eng . save ( WorkflowStatus . RUNNING ) db . session . commit ( )
Execute before processing the workflow .
10,384
def after_processing ( eng , objects ) : super ( InvenioProcessingFactory , InvenioProcessingFactory ) . after_processing ( eng , objects ) if eng . has_completed : eng . save ( WorkflowStatus . COMPLETED ) else : eng . save ( WorkflowStatus . HALTED ) db . session . commit ( )
Process to update status .
10,385
def Exception ( obj , eng , callbacks , exc_info ) : exception_repr = '' . join ( traceback . format_exception ( * exc_info ) ) msg = "Error:\n%s" % ( exception_repr ) eng . log . error ( msg ) if obj : obj . extra_data [ '_error_msg' ] = exception_repr obj . save ( status = obj . known_statuses . ERROR , callback_pos = eng . state . callback_pos , id_workflow = eng . uuid ) eng . save ( WorkflowStatus . ERROR ) db . session . commit ( ) super ( InvenioTransitionAction , InvenioTransitionAction ) . Exception ( obj , eng , callbacks , exc_info )
Handle general exceptions in workflow saving states .
10,386
def WaitProcessing ( obj , eng , callbacks , exc_info ) : e = exc_info [ 1 ] obj . set_action ( e . action , e . message ) obj . save ( status = eng . object_status . WAITING , callback_pos = eng . state . callback_pos , id_workflow = eng . uuid ) eng . save ( WorkflowStatus . HALTED ) eng . log . warning ( "Workflow '%s' waiting at task %s with message: %s" , eng . name , eng . current_taskname or "Unknown" , e . message ) db . session . commit ( ) TransitionActions . HaltProcessing ( obj , eng , callbacks , exc_info )
Take actions when WaitProcessing is raised .
10,387
def StopProcessing ( obj , eng , callbacks , exc_info ) : e = exc_info [ 1 ] obj . save ( status = eng . object_status . COMPLETED , id_workflow = eng . uuid ) eng . save ( WorkflowStatus . COMPLETED ) obj . log . warning ( "Workflow '%s' stopped at task %s with message: %s" , eng . name , eng . current_taskname or "Unknown" , e . message ) db . session . commit ( ) super ( InvenioTransitionAction , InvenioTransitionAction ) . StopProcessing ( obj , eng , callbacks , exc_info )
Stop the engne and mark the workflow as completed .
10,388
def SkipToken ( obj , eng , callbacks , exc_info ) : msg = "Skipped running this object: {0}" . format ( obj . id ) eng . log . debug ( msg ) raise Continue
Take action when SkipToken is raised .
10,389
def AbortProcessing ( obj , eng , callbacks , exc_info ) : msg = "Processing was aborted for object: {0}" . format ( obj . id ) eng . log . debug ( msg ) raise Break
Take action when AbortProcessing is raised .
10,390
def edits1 ( word ) : "All edits that are one edit away from `word`." letters = 'qwertyuiopasdfghjklzxcvbnm' splits = [ ( word [ : i ] , word [ i : ] ) for i in range ( len ( word ) + 1 ) ] print ( 'splits = ' , splits ) deletes = [ L + R [ 1 : ] for L , R in splits if R ] print ( 'deletes = ' , deletes ) transposes = [ L + R [ 1 ] + R [ 0 ] + R [ 2 : ] for L , R in splits if len ( R ) > 1 ] print ( 'transposes = ' , transposes ) replaces = [ L + c + R [ 1 : ] for L , R in splits if R for c in letters ] print ( 'replaces = ' , replaces ) inserts = [ L + c + R for L , R in splits for c in letters ] print ( 'inserts = ' , inserts ) print ( deletes + transposes + replaces + inserts ) print ( len ( set ( deletes + transposes + replaces + inserts ) ) ) return deletes + transposes + replaces + inserts
All edits that are one edit away from word .
10,391
def to_locus ( variant_or_locus ) : if isinstance ( variant_or_locus , Locus ) : return variant_or_locus try : return variant_or_locus . locus except AttributeError : return Locus . from_inclusive_coordinates ( variant_or_locus . contig , variant_or_locus . start , variant_or_locus . end )
Return a Locus object for a Variant instance .
10,392
def pileup ( self , locus ) : locus = to_locus ( locus ) if len ( locus . positions ) != 1 : raise ValueError ( "Not a single-base locus: %s" % locus ) return self . pileups [ locus ]
Given a 1 - base locus return the Pileup at that locus .
10,393
def at ( self , * loci ) : loci = [ to_locus ( obj ) for obj in loci ] single_position_loci = [ ] for locus in loci : for position in locus . positions : single_position_loci . append ( Locus . from_interbase_coordinates ( locus . contig , position ) ) pileups = dict ( ( locus , self . pileups [ locus ] ) for locus in single_position_loci ) return PileupCollection ( pileups , self )
Return a new PileupCollection instance including only pileups for the specified loci .
10,394
def reads ( self ) : def alignment_precedence ( pysam_alignment_record ) : return pysam_alignment_record . mapping_quality result = { } for pileup in self . pileups . values ( ) : for e in pileup . elements : key = read_key ( e . alignment ) if key not in result or ( alignment_precedence ( e . alignment ) > alignment_precedence ( result [ key ] ) ) : result [ key ] = e . alignment return list ( result . values ( ) )
The reads in this PileupCollection . All reads will have an alignment that overlaps at least one of the included loci .
10,395
def read_attributes ( self , attributes = None ) : def include ( attribute ) : return attributes is None or attribute in attributes reads = self . reads ( ) possible_column_names = list ( PileupCollection . _READ_ATTRIBUTE_NAMES ) result = OrderedDict ( ( name , [ getattr ( read , name ) for read in reads ] ) for name in PileupCollection . _READ_ATTRIBUTE_NAMES if include ( name ) ) if reads : tag_dicts = [ dict ( x . get_tags ( ) ) for x in reads ] tag_keys = set . union ( * [ set ( item . keys ( ) ) for item in tag_dicts ] ) for tag_key in sorted ( tag_keys ) : column_name = "TAG_%s" % tag_key possible_column_names . append ( column_name ) if include ( column_name ) : result [ column_name ] = [ d . get ( tag_key ) for d in tag_dicts ] possible_column_names . append ( "pysam_alignment_record" ) if include ( "pysam_alignment_record" ) : result [ "pysam_alignment_record" ] = reads if attributes is not None : for attribute in attributes : if attribute not in result : raise ValueError ( "No such attribute: %s. Valid attributes are: %s" % ( attribute , " " . join ( possible_column_names ) ) ) assert set ( attributes ) == set ( result ) return pandas . DataFrame ( result )
Collect read attributes across reads in this PileupCollection into a pandas . DataFrame .
10,396
def group_by_allele ( self , locus ) : locus = to_locus ( locus ) read_to_allele = None loci = [ ] if locus . positions : for position in locus . positions : base_position = Locus . from_interbase_coordinates ( locus . contig , position ) loci . append ( base_position ) new_read_to_allele = { } for element in self . pileups [ base_position ] : allele_prefix = "" key = alignment_key ( element . alignment ) if read_to_allele is not None : try : allele_prefix = read_to_allele [ key ] except KeyError : continue allele = allele_prefix + element . bases new_read_to_allele [ key ] = allele read_to_allele = new_read_to_allele else : position_before = Locus . from_interbase_coordinates ( locus . contig , locus . start ) loci . append ( position_before ) read_to_allele = { } for element in self . pileups [ position_before ] : allele = element . bases [ 1 : ] read_to_allele [ alignment_key ( element . alignment ) ] = allele split = defaultdict ( lambda : PileupCollection ( pileups = { } , parent = self ) ) for locus in loci : pileup = self . pileups [ locus ] for e in pileup . elements : key = read_to_allele . get ( alignment_key ( e . alignment ) ) if key is not None : if locus in split [ key ] . pileups : split [ key ] . pileups [ locus ] . append ( e ) else : split [ key ] . pileups [ locus ] = Pileup ( locus , [ e ] ) def sorter ( pair ) : ( allele , pileup_collection ) = pair return ( - 1 * pileup_collection . num_reads ( ) , allele ) return OrderedDict ( sorted ( split . items ( ) , key = sorter ) )
Split the PileupCollection by the alleles suggested by the reads at the specified locus .
10,397
def allele_summary ( self , locus , score = lambda x : x . num_reads ( ) ) : locus = to_locus ( locus ) return [ ( allele , score ( x ) ) for ( allele , x ) in self . group_by_allele ( locus ) . items ( ) ]
Convenience method to summarize the evidence for each of the alleles present at a locus . Applies a score function to the PileupCollection associated with each allele .
10,398
def group_by_match ( self , variant ) : locus = to_locus ( variant ) if len ( variant . ref ) != len ( locus . positions ) : logging . warning ( "Ref is length %d but locus has %d bases in variant: %s" % ( len ( variant . ref ) , len ( locus . positions ) , str ( variant ) ) ) alleles_dict = self . group_by_allele ( locus ) single_base_loci = [ Locus . from_interbase_coordinates ( locus . contig , position ) for position in locus . positions ] empty_pileups = dict ( ( locus , Pileup ( locus = locus , elements = [ ] ) ) for locus in single_base_loci ) empty_collection = PileupCollection ( pileups = empty_pileups , parent = self ) ref = { variant . ref : alleles_dict . pop ( variant . ref , empty_collection ) } alt = { variant . alt : alleles_dict . pop ( variant . alt , empty_collection ) } other = alleles_dict return MatchingEvidence ( ref , alt , other )
Given a variant split the PileupCollection based on whether it the data supports the reference allele the alternate allele or neither .
10,399
def match_summary ( self , variant , score = lambda x : x . num_reads ( ) ) : split = self . group_by_match ( variant ) def name ( allele_to_pileup_collection ) : return "," . join ( allele_to_pileup_collection ) def aggregate_and_score ( pileup_collections ) : merged = PileupCollection . merge ( * pileup_collections ) return score ( merged ) result = [ ( name ( split . ref ) , aggregate_and_score ( split . ref . values ( ) ) ) , ( name ( split . alt ) , aggregate_and_score ( split . alt . values ( ) ) ) , ] result . extend ( ( allele , score ( collection ) ) for ( allele , collection ) in split . other . items ( ) ) return result
Convenience method to summarize the evidence for and against a variant using a user - specified score function .