idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
9,700
def blob_generator ( self ) : import aa from ROOT import EventFile filename = self . filename log . info ( "Reading from file: {0}" . format ( filename ) ) if not os . path . exists ( filename ) : log . warning ( filename + " not available: continue without it" ) try : event_file = EventFile ( filename ) except Exception : raise SystemExit ( "Could not open file" ) log . info ( "Generating blobs through new aanet API..." ) self . print ( "Reading metadata using 'JPrintMeta'" ) meta_parser = MetaParser ( filename = filename ) meta = meta_parser . get_table ( ) if meta is None : self . log . warning ( "No metadata found, this means no data provenance!" ) if self . bare : log . info ( "Skipping data conversion, only passing bare aanet data" ) for event in event_file : yield Blob ( { 'evt' : event , 'event_file' : event_file } ) else : log . info ( "Unpacking aanet header into dictionary..." ) hdr = self . _parse_header ( event_file . header ) if not hdr : log . info ( "Empty header dict found, skipping..." ) self . raw_header = None else : log . info ( "Converting Header dict to Table..." ) self . raw_header = self . _convert_header_dict_to_table ( hdr ) log . info ( "Creating HDF5Header" ) self . header = HDF5Header . from_table ( self . raw_header ) for event in event_file : log . debug ( 'Reading event...' ) blob = self . _read_event ( event , filename ) log . debug ( 'Reading header...' ) blob [ "RawHeader" ] = self . raw_header blob [ "Header" ] = self . header if meta is not None : blob [ 'Meta' ] = meta self . group_id += 1 yield blob del event_file
Create a blob generator .
9,701
def parse_string ( self , string ) : self . log . info ( "Parsing ASCII data" ) if not string : self . log . warning ( "Empty metadata" ) return lines = string . splitlines ( ) application_data = [ ] application = lines [ 0 ] . split ( ) [ 0 ] self . log . debug ( "Reading meta information for '%s'" % application ) for line in lines : if application is None : self . log . debug ( "Reading meta information for '%s'" % application ) application = line . split ( ) [ 0 ] application_data . append ( line ) if line . startswith ( application + b' Linux' ) : self . _record_app_data ( application_data ) application_data = [ ] application = None
Parse ASCII output of JPrintMeta
9,702
def _record_app_data ( self , data ) : name , revision = data [ 0 ] . split ( ) root_version = data [ 1 ] . split ( ) [ 1 ] command = b'\n' . join ( data [ 3 : ] ) . split ( b'\n' + name + b' Linux' ) [ 0 ] self . meta . append ( { 'application_name' : np . string_ ( name ) , 'revision' : np . string_ ( revision ) , 'root_version' : np . string_ ( root_version ) , 'command' : np . string_ ( command ) } )
Parse raw metadata output for a single application
9,703
def get_table ( self , name = 'Meta' , h5loc = '/meta' ) : if not self . meta : return None data = defaultdict ( list ) for entry in self . meta : for key , value in entry . items ( ) : data [ key ] . append ( value ) dtypes = [ ] for key , values in data . items ( ) : max_len = max ( map ( len , values ) ) dtype = 'S{}' . format ( max_len ) dtypes . append ( ( key , dtype ) ) tab = Table ( data , dtype = dtypes , h5loc = h5loc , name = 'Meta' , h5singleton = True ) return tab
Convert metadata to a KM3Pipe Table .
9,704
def itermovieshash ( self ) : cur = self . _db . firstkey ( ) while cur is not None : yield cur cur = self . _db . nextkey ( cur )
Iterate over movies hash stored in the database .
9,705
def _max_retries_for_error ( self , error ) : status = error . get ( "status" ) if status == "ABORTED" and get_transactions ( ) > 0 : return None return self . _MAX_RETRIES . get ( status )
Handles Datastore response errors according to their documentation .
9,706
def anonymous_login ( services ) : if isinstance ( services , str ) : services = [ services ] clients = { } for serv in services : try : clients [ serv ] = KNOWN_CLIENTS [ serv ] ( http_timeout = STD_TIMEOUT ) except KeyError : print ( "Error: No known client for '{}' service." . format ( serv ) ) except Exception : print ( "Error: Unable to create client for '{}' service.\n" "Anonymous access may not be allowed." . format ( serv ) ) return clients
Initialize services without authenticating to Globus Auth .
9,707
def logout ( token_dir = DEFAULT_CRED_PATH ) : for f in os . listdir ( token_dir ) : if f . endswith ( "tokens.json" ) : try : os . remove ( os . path . join ( token_dir , f ) ) except OSError as e : if e . errno != errno . ENOENT : raise
Remove ALL tokens in the token directory . This will force re - authentication to all services .
9,708
def format_gmeta ( data , acl = None , identifier = None ) : if isinstance ( data , dict ) : if acl is None or identifier is None : raise ValueError ( "acl and identifier are required when formatting a GMetaEntry." ) if isinstance ( acl , str ) : acl = [ acl ] prefixed_acl = [ ] for uuid in acl : if uuid != "public" and not uuid . lower ( ) . startswith ( "urn:" ) : prefixed_acl . append ( "urn:globus:auth:identity:" + uuid . lower ( ) ) prefixed_acl . append ( "urn:globus:groups:id:" + uuid . lower ( ) ) else : prefixed_acl . append ( uuid ) return { "@datatype" : "GMetaEntry" , "@version" : "2016-11-09" , "subject" : identifier , "visible_to" : prefixed_acl , "content" : data } elif isinstance ( data , list ) : return { "@datatype" : "GIngest" , "@version" : "2016-11-09" , "ingest_type" : "GMetaList" , "ingest_data" : { "@datatype" : "GMetaList" , "@version" : "2016-11-09" , "gmeta" : data } } else : raise TypeError ( "Cannot format '" + str ( type ( data ) ) + "' into GMeta." )
Format input into GMeta format suitable for ingesting into Globus Search . Formats a dictionary into a GMetaEntry . Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest .
9,709
def gmeta_pop ( gmeta , info = False ) : if type ( gmeta ) is GlobusHTTPResponse : gmeta = json . loads ( gmeta . text ) elif type ( gmeta ) is str : gmeta = json . loads ( gmeta ) elif type ( gmeta ) is not dict : raise TypeError ( "gmeta must be dict, GlobusHTTPResponse, or JSON string" ) results = [ ] for res in gmeta [ "gmeta" ] : for con in res [ "content" ] : results . append ( con ) if info : fyi = { "total_query_matches" : gmeta . get ( "total" ) } return results , fyi else : return results
Remove GMeta wrapping from a Globus Search result . This function can be called on the raw GlobusHTTPResponse that Search returns or a string or dictionary representation of it .
9,710
def translate_index ( index_name ) : uuid = SEARCH_INDEX_UUIDS . get ( index_name . strip ( ) . lower ( ) ) if not uuid : try : index_info = globus_sdk . SearchClient ( ) . get_index ( index_name ) . data if not isinstance ( index_info , dict ) : raise ValueError ( "Multiple UUIDs possible" ) uuid = index_info . get ( "id" , index_name ) except Exception : uuid = index_name return uuid
Translate a known Globus Search index into the index UUID . The UUID is the proper way to access indices and will eventually be the only way . This method will return names it cannot disambiguate .
9,711
def quick_transfer ( transfer_client , source_ep , dest_ep , path_list , interval = None , retries = 10 , notify = True ) : if retries is None : retries = 0 iterations = 0 transfer = custom_transfer ( transfer_client , source_ep , dest_ep , path_list , notify = notify ) res = next ( transfer ) try : while True : if iterations < retries or retries == - 1 : res = transfer . send ( True ) iterations += 1 else : res = transfer . send ( False ) except StopIteration : pass if res [ "success" ] : error = "No error" else : error = "{}: {}" . format ( res . get ( "fatal_error" , { } ) . get ( "code" , "Error" ) , res . get ( "fatal_error" , { } ) . get ( "description" , "Unknown" ) ) return { "success" : res [ "success" ] , "task_id" : res [ "task_id" ] , "error" : error }
Perform a Globus Transfer and monitor for success .
9,712
def insensitive_comparison ( item1 , item2 , type_insensitive = False , string_insensitive = False ) : if not type_insensitive and type ( item1 ) != type ( item2 ) : return False if isinstance ( item1 , Mapping ) : if not isinstance ( item2 , Mapping ) : return False if not len ( item1 ) == len ( item2 ) : return False if not insensitive_comparison ( list ( item1 . keys ( ) ) , list ( item2 . keys ( ) ) , type_insensitive = True ) : return False for key , val in item1 . items ( ) : if not insensitive_comparison ( item1 [ key ] , item2 [ key ] , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : return False return True elif isinstance ( item1 , str ) : if not isinstance ( item2 , str ) : return False if not len ( item1 ) == len ( item2 ) and not string_insensitive : return False if string_insensitive : if len ( item1 ) <= 1 : return item1 . lower ( ) == item2 . lower ( ) item1_list = [ c for c in item1 . lower ( ) if not c . isspace ( ) ] item2_list = [ c for c in item2 . lower ( ) if not c . isspace ( ) ] return insensitive_comparison ( item1_list , item2_list , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) else : return item1 == item2 elif isinstance ( item1 , Container ) and isinstance ( item1 , Iterable ) : if not isinstance ( item2 , Container ) or not isinstance ( item2 , Iterable ) : return False if not len ( item1 ) == len ( item2 ) : return False item2_copy = list ( deepcopy ( item2 ) ) remove_failed = False for elem in item1 : matched = False for candidate in item2 : if insensitive_comparison ( elem , candidate , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : matched = True try : item2_copy . remove ( candidate ) except ValueError : remove_failed = True break if not matched : return False if not remove_failed : return len ( item2_copy ) == 0 else : for elem in item2 : matched = False for candidate in item1 : if insensitive_comparison ( elem , candidate , type_insensitive = type_insensitive , string_insensitive = string_insensitive ) : matched = True break if not matched : return False return True else : return item1 == item2
Compare two items without regard to order .
9,713
def parse_json ( json_file , include_date = False ) : if json_file [ - 2 : ] == 'gz' : fh = gzip . open ( json_file , 'rt' ) else : fh = io . open ( json_file , mode = 'rt' , encoding = 'utf8' ) for line in fh : try : jj = json . loads ( line ) if type ( jj ) is not list : jj = [ jj ] for j in jj : if include_date : yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'text' ] , j [ 'created_at' ] ) else : if 'full_text' in j : yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'full_text' ] ) else : yield ( j [ 'user' ] [ 'screen_name' ] . lower ( ) , j [ 'text' ] ) except Exception as e : sys . stderr . write ( 'skipping json error: %s\n' % e )
Yield screen_name text tuples from a json file .
9,714
def extract_tweets ( json_file ) : for screen_name , tweet_iter in groupby ( parse_json ( json_file ) , lambda x : x [ 0 ] ) : tweets = [ t [ 1 ] for t in tweet_iter ] yield screen_name , ' ' . join ( tweets )
Yield screen_name string tuples where the string is the concatenation of all tweets of this user .
9,715
def vectorize ( json_file , vec , dofit = True ) : screen_names = [ x [ 0 ] for x in extract_tweets ( json_file ) ] if dofit : X = vec . fit_transform ( x [ 1 ] for x in extract_tweets ( json_file ) ) else : X = vec . transform ( x [ 1 ] for x in extract_tweets ( json_file ) ) return screen_names , X
Return a matrix where each row corresponds to a Twitter account and each column corresponds to the number of times a term is used by that account .
9,716
def read_follower_file ( fname , min_followers = 0 , max_followers = 1e10 , blacklist = set ( ) ) : result = { } with open ( fname , 'rt' ) as f : for line in f : parts = line . split ( ) if len ( parts ) > 3 : if parts [ 1 ] . lower ( ) not in blacklist : followers = set ( int ( x ) for x in parts [ 2 : ] ) if len ( followers ) > min_followers and len ( followers ) <= max_followers : result [ parts [ 1 ] . lower ( ) ] = followers else : print ( 'skipping exemplar' , parts [ 1 ] . lower ( ) ) return result
Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids .
9,717
def jaccard_merge ( brands , exemplars ) : scores = { } exemplar_followers = set ( ) for followers in exemplars . values ( ) : exemplar_followers |= followers for brand , followers in brands : scores [ brand ] = _jaccard ( followers , exemplar_followers ) return scores
Return the average Jaccard similarity between a brand s followers and the followers of each exemplar . We merge all exemplar followers into one big pseudo - account .
9,718
def cosine ( brands , exemplars , weighted_avg = False , sqrt = False ) : scores = { } for brand , followers in brands : if weighted_avg : scores [ brand ] = np . average ( [ _cosine ( followers , others ) for others in exemplars . values ( ) ] , weights = [ 1. / len ( others ) for others in exemplars . values ( ) ] ) else : scores [ brand ] = 1. * sum ( _cosine ( followers , others ) for others in exemplars . values ( ) ) / len ( exemplars ) if sqrt : scores = dict ( [ ( b , math . sqrt ( s ) ) for b , s in scores . items ( ) ] ) return scores
Return the cosine similarity betwee a brand s followers and the exemplars .
9,719
def suggest_filename ( metadata ) : if 'title' in metadata and 'track_number' in metadata : suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}" elif 'title' in metadata and 'trackNumber' in metadata : suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['title']}" elif 'title' in metadata and 'tracknumber' in metadata : track_number = _split_number_field ( list_to_single_value ( metadata [ 'tracknumber' ] ) ) title = list_to_single_value ( metadata [ 'title' ] ) suggested_filename = f"{track_number:0>2} {title}" else : suggested_filename = f"00 {list_to_single_value(metadata.get('title', ['']))}" return _replace_invalid_characters ( suggested_filename )
Generate a filename like Google for a song based on metadata .
9,720
def template_to_filepath ( template , metadata , template_patterns = None ) : path = Path ( template ) if template_patterns is None : template_patterns = TEMPLATE_PATTERNS suggested_filename = suggest_filename ( metadata ) if ( path == Path . cwd ( ) or path == Path ( '%suggested%' ) ) : filepath = Path ( suggested_filename ) elif any ( template_pattern in path . parts for template_pattern in template_patterns ) : if template . endswith ( ( '/' , '\\' ) ) : template += suggested_filename path = Path ( template . replace ( '%suggested%' , suggested_filename ) ) parts = [ ] for part in path . parts : if part == path . anchor : parts . append ( part ) else : for key in template_patterns : if ( key in part and any ( field in metadata for field in template_patterns [ key ] ) ) : field = more_itertools . first_true ( template_patterns [ key ] , pred = lambda k : k in metadata ) if key . startswith ( ( '%disc' , '%track' ) ) : number = _split_number_field ( str ( list_to_single_value ( metadata [ field ] ) ) ) if key . endswith ( '2%' ) : metadata [ field ] = number . zfill ( 2 ) else : metadata [ field ] = number part = part . replace ( key , list_to_single_value ( metadata [ field ] ) ) parts . append ( _replace_invalid_characters ( part ) ) filepath = Path ( * parts ) elif '%suggested%' in template : filepath = Path ( template . replace ( '%suggested%' , suggested_filename ) ) elif template . endswith ( ( '/' , '\\' ) ) : filepath = path / suggested_filename else : filepath = path return filepath
Create directory structure and file name based on metadata template .
9,721
def _match_field ( field_value , pattern , ignore_case = False , normalize_values = False ) : if normalize_values : ignore_case = True normalize = normalize_value if normalize_values else lambda x : str ( x ) search = functools . partial ( re . search , flags = re . I ) if ignore_case else re . search if isinstance ( field_value , list ) : return any ( search ( pattern , normalize ( value ) ) for value in field_value ) else : return search ( pattern , normalize ( field_value ) )
Match an item metadata field value by pattern .
9,722
def _match_item ( item , any_all = any , ignore_case = False , normalize_values = False , ** kwargs ) : it = get_item_tags ( item ) return any_all ( _match_field ( get_field ( it , field ) , pattern , ignore_case = ignore_case , normalize_values = normalize_values ) for field , patterns in kwargs . items ( ) for pattern in patterns )
Match items by metadata .
9,723
def exclude_items ( items , any_all = any , ignore_case = False , normalize_values = False , ** kwargs ) : if kwargs : match = functools . partial ( _match_item , any_all = any_all , ignore_case = ignore_case , normalize_values = normalize_values , ** kwargs ) return filterfalse ( match , items ) else : return iter ( items )
Exclude items by matching metadata .
9,724
def include_items ( items , any_all = any , ignore_case = False , normalize_values = False , ** kwargs ) : if kwargs : match = functools . partial ( _match_item , any_all = any_all , ignore_case = ignore_case , normalize_values = normalize_values , ** kwargs ) return filter ( match , items ) else : return iter ( items )
Include items by matching metadata .
9,725
def percentile ( a , q ) : if not a : return None if isinstance ( q , ( float , int ) ) : qq = [ q ] elif isinstance ( q , ( tuple , list ) ) : qq = q else : raise ValueError ( "Quantile type {} not understood" . format ( type ( q ) ) ) if isinstance ( a , ( float , int ) ) : a = [ a ] for i in range ( len ( qq ) ) : if qq [ i ] < 0. or qq [ i ] > 100. : raise ValueError ( "Percentiles must be in the range [0,100]" ) qq [ i ] /= 100. a = sorted ( flatten ( a ) ) r = [ ] for q in qq : k = ( len ( a ) - 1 ) * q f = math . floor ( k ) c = math . ceil ( k ) if f == c : r . append ( float ( a [ int ( k ) ] ) ) continue d0 = a [ int ( f ) ] * ( c - k ) d1 = a [ int ( c ) ] * ( k - f ) r . append ( float ( d0 + d1 ) ) if len ( r ) == 1 : return r [ 0 ] return r
Compute the qth percentile of the data along the specified axis . Simpler version than the numpy version that always flattens input arrays .
9,726
def _filter_closest ( self , lat , lon , stations ) : current_location = ( lat , lon ) closest = None closest_distance = None for station in stations : station_loc = ( station . latitude , station . longitude ) station_distance = distance . distance ( current_location , station_loc ) . km if not closest or station_distance < closest_distance : closest = station closest_distance = station_distance return closest
Helper to filter the closest station to a given location .
9,727
async def get ( cls , websession , lat , lon ) : self = Station ( websession ) stations = await self . api . stations ( ) self . station = self . _filter_closest ( lat , lon , stations ) logger . info ( "Using %s as weather station" , self . station . local ) return self
Retrieve the nearest station .
9,728
def load_channels ( self ) : channels = [ ] for channel_name in self . channel_names : channel_path = os . path . join ( self . path , "channels" ) sys . path . append ( self . path ) mod = imp . load_module ( channel_name , * imp . find_module ( channel_name , [ channel_path ] ) ) cls = getattr ( mod , channel_name . title ( ) . replace ( "_" , "" ) ) channel_id = channel_name . split ( "_" ) [ 0 ] try : channels . append ( cls ( channel_id , up_to_timestamp = None ) ) except TypeError : channels . append ( cls ( channel_id ) ) if self . has_tools : tool_path = os . path . join ( self . path , "tools" ) channel_id = self . channel_id_prefix + "_" + "tools" channel = ToolChannel ( channel_id , tool_path , up_to_timestamp = utcnow ( ) ) channels . append ( channel ) if self . has_assets : assset_path = os . path . join ( os . path . abspath ( self . path ) , "assets" ) channel_id = self . channel_id_prefix + "_" + "assets" channel = AssetsFileChannel ( channel_id , assset_path , up_to_timestamp = utcnow ( ) ) channels . append ( channel ) return channels
Loads the channels and tools given the plugin path specified
9,729
def chunk_count ( swatch ) : if type ( swatch ) is dict : if 'data' in swatch : return 1 if 'swatches' in swatch : return 2 + len ( swatch [ 'swatches' ] ) else : return sum ( map ( chunk_count , swatch ) )
return the number of byte - chunks in a swatch object
9,730
def chunk_for_color ( obj ) : title = obj [ 'name' ] + '\0' title_length = len ( title ) chunk = struct . pack ( '>H' , title_length ) chunk += title . encode ( 'utf-16be' ) mode = obj [ 'data' ] [ 'mode' ] . encode ( ) values = obj [ 'data' ] [ 'values' ] color_type = obj [ 'type' ] fmt = { b'RGB' : '!fff' , b'Gray' : '!f' , b'CMYK' : '!ffff' , b'LAB' : '!fff' } if mode in fmt : padded_mode = mode . decode ( ) . ljust ( 4 ) . encode ( ) chunk += struct . pack ( '!4s' , padded_mode ) chunk += struct . pack ( fmt [ mode ] , * values ) color_types = [ 'Global' , 'Spot' , 'Process' ] if color_type in color_types : color_int = color_types . index ( color_type ) chunk += struct . pack ( '>h' , color_int ) chunk = struct . pack ( '>I' , len ( chunk ) ) + chunk return b'\x00\x01' + chunk
builds up a byte - chunk for a color
9,731
def chunk_for_folder ( obj ) : title = obj [ 'name' ] + '\0' title_length = len ( title ) chunk_body = struct . pack ( '>H' , title_length ) chunk_body += title . encode ( 'utf-16be' ) chunk_head = b'\xC0\x01' chunk_head += struct . pack ( '>I' , len ( chunk_body ) ) chunk = chunk_head + chunk_body chunk += b'' . join ( [ chunk_for_color ( c ) for c in obj [ 'swatches' ] ] ) chunk += b'\xC0\x02' chunk += b'\x00\x00\x00\x00' return chunk
produce a byte - chunk for a folder of colors
9,732
def iter_lines ( filename ) : with open ( filename , 'rt' ) as idfile : for line in idfile : screen_name = line . strip ( ) if len ( screen_name ) > 0 : yield screen_name . split ( ) [ 0 ]
Iterate over screen names in a file one per line .
9,733
def fetch_tweets ( account_file , outfile , limit ) : print ( 'fetching tweets for accounts in' , account_file ) outf = io . open ( outfile , 'wt' ) for screen_name in iter_lines ( account_file ) : print ( '\nFetching tweets for %s' % screen_name ) for tweet in twutil . collect . tweets_for_user ( screen_name , limit ) : tweet [ 'user' ] [ 'screen_name' ] = screen_name outf . write ( '%s\n' % json . dumps ( tweet , ensure_ascii = False ) ) outf . flush ( )
Fetch up to limit tweets for each account in account_file and write to outfile .
9,734
def fetch_exemplars ( keyword , outfile , n = 50 ) : list_urls = fetch_lists ( keyword , n ) print ( 'found %d lists for %s' % ( len ( list_urls ) , keyword ) ) counts = Counter ( ) for list_url in list_urls : counts . update ( fetch_list_members ( list_url ) ) outf = io . open ( outfile , 'wt' ) for handle in sorted ( counts ) : outf . write ( '%s\t%d\n' % ( handle , counts [ handle ] ) ) outf . close ( ) print ( 'saved exemplars to' , outfile )
Fetch top lists matching this keyword then return Twitter screen names along with the number of different lists on which each appers ..
9,735
def _init_controlhost ( self ) : log . debug ( "Connecting to JLigier" ) self . client = Client ( self . host , self . port ) self . client . _connect ( ) log . debug ( "Subscribing to tags: {0}" . format ( self . tags ) ) for tag in self . tags . split ( ',' ) : self . client . subscribe ( tag . strip ( ) , mode = self . subscription_mode ) log . debug ( "Controlhost initialisation done." )
Set up the controlhost connection
9,736
def process ( self , blob ) : try : log . debug ( "Waiting for queue items." ) prefix , data = self . queue . get ( timeout = self . timeout ) log . debug ( "Got {0} bytes from queue." . format ( len ( data ) ) ) except Empty : log . warning ( "ControlHost timeout ({0}s) reached" . format ( self . timeout ) ) raise StopIteration ( "ControlHost timeout reached." ) blob [ self . key_for_prefix ] = prefix blob [ self . key_for_data ] = data return blob
Wait for the next packet and put it in the blob
9,737
def finish ( self ) : log . debug ( "Disconnecting from JLigier." ) self . client . socket . shutdown ( socket . SHUT_RDWR ) self . client . _disconnect ( )
Clean up the JLigier controlhost connection
9,738
def list_variables ( self ) : station_codes = self . _get_station_codes ( ) station_codes = self . _apply_features_filter ( station_codes ) variables = self . _list_variables ( station_codes ) if hasattr ( self , "_variables" ) and self . variables is not None : variables . intersection_update ( set ( self . variables ) ) return list ( variables )
List available variables and applies any filters .
9,739
def _list_variables ( self , station_codes ) : rvar = re . compile ( r"\n\s([A-Z]{2}[A-Z0-9]{0,1})\(\w+\)" ) variables = set ( ) resp = requests . post ( self . obs_retrieval_url , data = { "state" : "nil" , "hsa" : "nil" , "of" : "3" , "extraids" : " " . join ( station_codes ) , "sinceday" : - 1 , } , ) resp . raise_for_status ( ) list ( map ( variables . add , rvar . findall ( resp . text ) ) ) return variables
Internal helper to list the variables for the given station codes .
9,740
def _apply_features_filter ( self , station_codes ) : if hasattr ( self , "features" ) and self . features is not None : station_codes = set ( station_codes ) station_codes = list ( station_codes . intersection ( set ( self . features ) ) ) return station_codes
If the features filter is set this will return the intersection of those filter items and the given station codes .
9,741
def _get_station_codes ( self , force = False ) : if not force and self . station_codes is not None : return self . station_codes state_urls = self . _get_state_urls ( ) state_matches = None if self . bbox : with collection ( os . path . join ( "resources" , "ne_50m_admin_1_states_provinces_lakes_shp.shp" , ) , "r" , ) as c : geom_matches = [ x [ "properties" ] for x in c . filter ( bbox = self . bbox ) ] state_matches = [ x [ "postal" ] if x [ "admin" ] != "Canada" else "CN" for x in geom_matches ] self . station_codes = [ ] for state_url in state_urls : if state_matches is not None : state_abbr = state_url . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ] if state_abbr not in state_matches : continue self . station_codes . extend ( self . _get_stations_for_state ( state_url ) ) if self . bbox : metadata = self . _get_metadata ( self . station_codes ) parsed_metadata = self . parser . _parse_metadata ( metadata ) def in_bbox ( code ) : lat = parsed_metadata [ code ] [ "latitude" ] lon = parsed_metadata [ code ] [ "longitude" ] return ( lon >= self . bbox [ 0 ] and lon <= self . bbox [ 2 ] and lat >= self . bbox [ 1 ] and lat <= self . bbox [ 3 ] ) self . station_codes = list ( filter ( in_bbox , self . station_codes ) ) return self . station_codes
Gets and caches a list of station codes optionally within a bbox .
9,742
def _monitor_task ( self ) : if self . task . state in states . UNREADY_STATES : reactor . callLater ( self . POLL_PERIOD , self . _monitor_task ) return if self . task . state == 'SUCCESS' : self . callback ( self . task . result ) elif self . task . state == 'FAILURE' : self . errback ( Failure ( self . task . result ) ) elif self . task . state == 'REVOKED' : self . errback ( Failure ( defer . CancelledError ( 'Task {0}' . format ( self . task . id ) ) ) ) else : self . errback ( ValueError ( 'Cannot respond to `{}` state' . format ( self . task . state ) ) )
Wrapper that handles the actual asynchronous monitoring of the task state .
9,743
def _clean_query_string ( q ) : q = q . replace ( "()" , "" ) . strip ( ) if q . endswith ( "(" ) : q = q [ : - 1 ] . strip ( ) if q [ - 3 : ] == "AND" or q [ - 3 : ] == "NOT" : q = q [ : - 3 ] elif q [ - 2 : ] == "OR" : q = q [ : - 2 ] while q . count ( "(" ) > q . count ( ")" ) : q += ")" while q . count ( ")" ) > q . count ( "(" ) : q = "(" + q return q . strip ( )
Clean up a query string for searching .
9,744
def _validate_query ( query ) : query = deepcopy ( query ) if query [ "q" ] == BLANK_QUERY [ "q" ] : raise ValueError ( "No query specified." ) query [ "q" ] = _clean_query_string ( query [ "q" ] ) if query [ "limit" ] is None : query [ "limit" ] = SEARCH_LIMIT if query [ "advanced" ] else NONADVANCED_LIMIT elif query [ "limit" ] > SEARCH_LIMIT : warnings . warn ( 'Reduced result limit from {} to the Search maximum: {}' . format ( query [ "limit" ] , SEARCH_LIMIT ) , RuntimeWarning ) query [ "limit" ] = SEARCH_LIMIT for key , val in BLANK_QUERY . items ( ) : if query . get ( key , float ( 'nan' ) ) == val : query . pop ( key ) to_remove = [ field for field in query . keys ( ) if field not in BLANK_QUERY . keys ( ) ] [ query . pop ( field ) for field in to_remove ] return query
Validate and clean up a query to be sent to Search . Cleans the query string removes unneeded parameters and validates for correctness . Does not modify the original argument . Raises an Exception on invalid input .
9,745
def _term ( self , term ) : term = str ( term ) if term : self . __query [ "q" ] += term return self
Add a term to the query .
9,746
def _operator ( self , op , close_group = False ) : op = op . upper ( ) . strip ( ) if op not in OP_LIST : raise ValueError ( "Error: '{}' is not a valid operator." . format ( op ) ) else : if close_group : op = ") " + op + " (" else : op = " " + op + " " self . __query [ "q" ] += op return self
Add an operator between terms . There must be a term added before using this method . All operators have helpers so this method is usually not necessary to directly invoke .
9,747
def _and_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "AND" , close_group = close_group ) return self
Combine terms with AND . There must be a term added before using this method .
9,748
def _or_join ( self , close_group = False ) : if not self . initialized : raise ValueError ( "You must add a search term before adding an operator." ) else : self . _operator ( "OR" , close_group = close_group ) return self
Combine terms with OR . There must be a term added before using this method .
9,749
def _mapping ( self ) : return ( self . __search_client . get ( "/unstable/index/{}/mapping" . format ( mdf_toolbox . translate_index ( self . index ) ) ) [ "mappings" ] )
Fetch the entire mapping for the specified index .
9,750
def match_term ( self , value , required = True , new_group = False ) : if self . initialized : if required : self . _and_join ( new_group ) else : self . _or_join ( new_group ) self . _term ( value ) return self
Add a fulltext search term to the query .
9,751
def match_exists ( self , field , required = True , new_group = False ) : return self . match_field ( field , "*" , required = required , new_group = new_group )
Require a field to exist in the results . Matches will have some value in field .
9,752
def match_not_exists ( self , field , new_group = False ) : return self . exclude_field ( field , "*" , new_group = new_group )
Require a field to not exist in the results . Matches will not have field present .
9,753
def show_fields ( self , block = None ) : mapping = self . _mapping ( ) if block is None : return mapping elif block == "top" : blocks = set ( ) for key in mapping . keys ( ) : blocks . add ( key . split ( "." ) [ 0 ] ) block_map = { } for b in blocks : block_map [ b ] = "object" else : block_map = { } for key , value in mapping . items ( ) : if key . startswith ( block ) : block_map [ key ] = value return block_map
Retrieve and return the mapping for the given metadata block .
9,754
def inflate_dtype ( arr , names ) : arr = np . asanyarray ( arr ) if has_structured_dt ( arr ) : return arr . dtype s_dt = arr . dtype dt = [ ( n , s_dt ) for n in names ] dt = np . dtype ( dt ) return dt
Create structured dtype from a 2d ndarray with unstructured dtype .
9,755
def from_dict ( cls , arr_dict , dtype = None , fillna = False , ** kwargs ) : if dtype is None : names = sorted ( list ( arr_dict . keys ( ) ) ) else : dtype = np . dtype ( dtype ) dt_names = [ f for f in dtype . names ] dict_names = [ k for k in arr_dict . keys ( ) ] missing_names = set ( dt_names ) - set ( dict_names ) if missing_names : if fillna : dict_names = dt_names for missing_name in missing_names : arr_dict [ missing_name ] = np . nan else : raise KeyError ( 'Dictionary keys and dtype fields do not match!' ) names = list ( dtype . names ) arr_dict = cls . _expand_scalars ( arr_dict ) data = [ arr_dict [ key ] for key in names ] return cls ( np . rec . fromarrays ( data , names = names , dtype = dtype ) , ** kwargs )
Generate a table from a dictionary of arrays .
9,756
def from_template ( cls , data , template ) : name = DEFAULT_NAME if isinstance ( template , str ) : name = template table_info = TEMPLATES [ name ] else : table_info = template if 'name' in table_info : name = table_info [ 'name' ] dt = table_info [ 'dtype' ] loc = table_info [ 'h5loc' ] split = table_info [ 'split_h5' ] h5singleton = table_info [ 'h5singleton' ] return cls ( data , h5loc = loc , dtype = dt , split_h5 = split , name = name , h5singleton = h5singleton )
Create a table from a predefined datatype .
9,757
def append_columns ( self , colnames , values , ** kwargs ) : n = len ( self ) if np . isscalar ( values ) : values = np . full ( n , values ) values = np . atleast_1d ( values ) if not isinstance ( colnames , str ) and len ( colnames ) > 1 : values = np . atleast_2d ( values ) self . _check_column_length ( values , n ) if values . ndim == 1 : if len ( values ) > n : raise ValueError ( "New Column is longer than existing table!" ) elif len ( values ) > 1 and len ( values ) < n : raise ValueError ( "New Column is shorter than existing table, " "but not just one element!" ) elif len ( values ) == 1 : values = np . full ( n , values [ 0 ] ) new_arr = rfn . append_fields ( self , colnames , values , usemask = False , asrecarray = True , ** kwargs ) return self . __class__ ( new_arr , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name , h5singleton = self . h5singleton )
Append new columns to the table .
9,758
def drop_columns ( self , colnames , ** kwargs ) : new_arr = rfn . drop_fields ( self , colnames , usemask = False , asrecarray = True , ** kwargs ) return self . __class__ ( new_arr , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name , h5singleton = self . h5singleton )
Drop columns from the table .
9,759
def sorted ( self , by , ** kwargs ) : sort_idc = np . argsort ( self [ by ] , ** kwargs ) return self . __class__ ( self [ sort_idc ] , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name )
Sort array by a column .
9,760
def merge ( cls , tables , fillna = False ) : cols = set ( itertools . chain ( * [ table . dtype . descr for table in tables ] ) ) tables_to_merge = [ ] for table in tables : missing_cols = cols - set ( table . dtype . descr ) if missing_cols : if fillna : n = len ( table ) n_cols = len ( missing_cols ) col_names = [ ] for col_name , col_dtype in missing_cols : if 'f' not in col_dtype : raise ValueError ( "Cannot create NaNs for non-float" " type column '{}'" . format ( col_name ) ) col_names . append ( col_name ) table = table . append_columns ( col_names , np . full ( ( n_cols , n ) , np . nan ) ) else : raise ValueError ( "Table columns do not match. Use fill_na=True" " if you want to append missing values with NaNs" ) tables_to_merge . append ( table ) first_table = tables_to_merge [ 0 ] merged_table = sum ( tables_to_merge [ 1 : ] , first_table ) merged_table . h5loc = first_table . h5loc merged_table . h5singleton = first_table . h5singleton merged_table . split_h5 = first_table . split_h5 merged_table . name = first_table . name return merged_table
Merge a list of tables
9,761
def create_index_tuple ( group_ids ) : max_group_id = np . max ( group_ids ) start_idx_arr = np . full ( max_group_id + 1 , 0 ) n_items_arr = np . full ( max_group_id + 1 , 0 ) current_group_id = group_ids [ 0 ] current_idx = 0 item_count = 0 for group_id in group_ids : if group_id != current_group_id : start_idx_arr [ current_group_id ] = current_idx n_items_arr [ current_group_id ] = item_count current_idx += item_count item_count = 0 current_group_id = group_id item_count += 1 else : start_idx_arr [ current_group_id ] = current_idx n_items_arr [ current_group_id ] = item_count return ( start_idx_arr , n_items_arr )
An helper function to create index tuples for fast lookup in HDF5Pump
9,762
def _set_attributes ( self ) : for parameter , data in self . _data . items ( ) : if isinstance ( data , dict ) or isinstance ( data , OrderedDict ) : field_names , field_values = zip ( * data . items ( ) ) sorted_indices = np . argsort ( field_names ) attr = namedtuple ( parameter , [ field_names [ i ] for i in sorted_indices ] ) setattr ( self , parameter , attr ( * [ field_values [ i ] for i in sorted_indices ] ) ) else : setattr ( self , parameter , data )
Traverse the internal dictionary and set the getters
9,763
def _write_ndarrays_cache_to_disk ( self ) : for h5loc , arrs in self . _ndarrays_cache . items ( ) : title = arrs [ 0 ] . title chunkshape = ( self . chunksize , ) + arrs [ 0 ] . shape [ 1 : ] if self . chunksize is not None else None arr = NDArray ( np . concatenate ( arrs ) , h5loc = h5loc , title = title ) if h5loc not in self . _ndarrays : loc , tabname = os . path . split ( h5loc ) ndarr = self . h5file . create_earray ( loc , tabname , tb . Atom . from_dtype ( arr . dtype ) , ( 0 , ) + arr . shape [ 1 : ] , chunkshape = chunkshape , title = title , filters = self . filters , createparents = True , ) self . _ndarrays [ h5loc ] = ndarr else : ndarr = self . _ndarrays [ h5loc ] idx_table_h5loc = h5loc + '_indices' if idx_table_h5loc not in self . indices : self . indices [ idx_table_h5loc ] = HDF5IndexTable ( idx_table_h5loc ) idx_tab = self . indices [ idx_table_h5loc ] for arr_length in ( len ( a ) for a in arrs ) : idx_tab . append ( arr_length ) ndarr . append ( arr ) self . _ndarrays_cache = defaultdict ( list )
Writes all the cached NDArrays to disk and empties the cache
9,764
def flush ( self ) : self . log . info ( 'Flushing tables and arrays to disk...' ) for tab in self . _tables . values ( ) : tab . flush ( ) self . _write_ndarrays_cache_to_disk ( )
Flush tables and arrays to disk
9,765
def main ( ) : print ( 'GLWindow:' , GLWindow . __version__ ) print ( 'Python:' , sys . version ) print ( 'Platform:' , sys . platform ) wnd = GLWindow . create_window ( ( 480 , 480 ) , title = 'GLWindow Sample' ) wnd . vsync = False ctx = ModernGL . create_context ( ) prog = ctx . program ( [ ctx . vertex_shader ( ) , ctx . fragment_shader ( ) , ] ) scale = prog . uniforms [ 'scale' ] rotation = prog . uniforms [ 'rotation' ] vbo = ctx . buffer ( struct . pack ( '18f' , 1.0 , 0.0 , 1.0 , 0.0 , 0.0 , 0.5 , - 0.5 , 0.86 , 0.0 , 1.0 , 0.0 , 0.5 , - 0.5 , - 0.86 , 0.0 , 0.0 , 1.0 , 0.5 , ) ) vao = ctx . simple_vertex_array ( prog , vbo , [ 'vert' , 'vert_color' ] ) while wnd . update ( ) : wnd . clear ( 0.95 , 0.95 , 0.95 ) width , height = wnd . size scale . value = ( height / width * 0.75 , 0.75 ) ctx . viewport = wnd . viewport ctx . enable ( ModernGL . BLEND ) rotation . value = wnd . time vao . render ( instances = 10 )
Sample program to test GLWindow .
9,766
def write_header ( fobj ) : fobj . write ( "# K40 calibration results\n" ) fobj . write ( "det_id\trun_id\tdom_id" ) for param in [ 't0' , 'qe' ] : for i in range ( 31 ) : fobj . write ( "\t{}_ch{}" . format ( param , i ) )
Add the header to the CSV file
9,767
def azimuth ( v ) : v = np . atleast_2d ( v ) azi = phi ( v ) - np . pi azi [ azi < 0 ] += 2 * np . pi if len ( azi ) == 1 : return azi [ 0 ] return azi
Return the azimuth angle in radians .
9,768
def unit_vector ( vector , ** kwargs ) : vector = np . array ( vector ) out_shape = vector . shape vector = np . atleast_2d ( vector ) unit = vector / np . linalg . norm ( vector , axis = 1 , ** kwargs ) [ : , None ] return unit . reshape ( out_shape )
Returns the unit vector of the vector .
9,769
def pld3 ( pos , line_vertex , line_dir ) : pos = np . atleast_2d ( pos ) line_vertex = np . atleast_1d ( line_vertex ) line_dir = np . atleast_1d ( line_dir ) c = np . cross ( line_dir , line_vertex - pos ) n1 = np . linalg . norm ( c , axis = 1 ) n2 = np . linalg . norm ( line_dir ) out = n1 / n2 if out . ndim == 1 and len ( out ) == 1 : return out [ 0 ] return out
Calculate the point - line - distance for given point and line .
9,770
def dist ( x1 , x2 , axis = 0 ) : return np . linalg . norm ( x2 - x1 , axis = axis )
Return the distance between two points .
9,771
def com ( points , masses = None ) : if masses is None : return np . average ( points , axis = 0 ) else : return np . average ( points , axis = 0 , weights = masses )
Calculate center of mass for given points . If masses is not set assume equal masses .
9,772
def circ_permutation ( items ) : permutations = [ ] for i in range ( len ( items ) ) : permutations . append ( items [ i : ] + items [ : i ] ) return permutations
Calculate the circular permutation for a given list of items .
9,773
def inertia ( x , y , z , weight = None ) : if weight is None : weight = 1 tensor_of_inertia = np . zeros ( ( 3 , 3 ) , dtype = float ) tensor_of_inertia [ 0 ] [ 0 ] = ( y * y + z * z ) * weight tensor_of_inertia [ 0 ] [ 1 ] = ( - 1 ) * x * y * weight tensor_of_inertia [ 0 ] [ 2 ] = ( - 1 ) * x * z * weight tensor_of_inertia [ 1 ] [ 0 ] = ( - 1 ) * x * y * weight tensor_of_inertia [ 1 ] [ 1 ] = ( x * x + z * z ) * weight tensor_of_inertia [ 1 ] [ 2 ] = ( - 1 ) * y * z * weight tensor_of_inertia [ 2 ] [ 0 ] = ( - 1 ) * x * z * weight tensor_of_inertia [ 2 ] [ 1 ] = ( - 1 ) * z * y * weight tensor_of_inertia [ 2 ] [ 2 ] = ( x * x + y * y ) * weight eigen_values = np . linalg . eigvals ( tensor_of_inertia ) small_inertia = eigen_values [ 2 ] [ 2 ] middle_inertia = eigen_values [ 1 ] [ 1 ] big_inertia = eigen_values [ 0 ] [ 0 ] return small_inertia , middle_inertia , big_inertia
Inertia tensor stolen of thomas
9,774
def qrot ( vector , quaternion ) : t = 2 * np . cross ( quaternion [ 1 : ] , vector ) v_rot = vector + quaternion [ 0 ] * t + np . cross ( quaternion [ 1 : ] , t ) return v_rot
Rotate a 3D vector using quaternion algebra .
9,775
def qeuler ( yaw , pitch , roll ) : yaw = np . radians ( yaw ) pitch = np . radians ( pitch ) roll = np . radians ( roll ) cy = np . cos ( yaw * 0.5 ) sy = np . sin ( yaw * 0.5 ) cr = np . cos ( roll * 0.5 ) sr = np . sin ( roll * 0.5 ) cp = np . cos ( pitch * 0.5 ) sp = np . sin ( pitch * 0.5 ) q = np . array ( ( cy * cr * cp + sy * sr * sp , cy * sr * cp - sy * cr * sp , cy * cr * sp + sy * sr * cp , sy * cr * cp - cy * sr * sp ) ) return q
Convert Euler angle to quaternion .
9,776
def intersect_3d ( p1 , p2 ) : v = p2 - p1 normed_v = unit_vector ( v ) nx = normed_v [ : , 0 ] ny = normed_v [ : , 1 ] nz = normed_v [ : , 2 ] xx = np . sum ( nx ** 2 - 1 ) yy = np . sum ( ny ** 2 - 1 ) zz = np . sum ( nz ** 2 - 1 ) xy = np . sum ( nx * ny ) xz = np . sum ( nx * nz ) yz = np . sum ( ny * nz ) M = np . array ( [ ( xx , xy , xz ) , ( xy , yy , yz ) , ( xz , yz , zz ) ] ) x = np . sum ( p1 [ : , 0 ] * ( nx ** 2 - 1 ) + p1 [ : , 1 ] * ( nx * ny ) + p1 [ : , 2 ] * ( nx * nz ) ) y = np . sum ( p1 [ : , 0 ] * ( nx * ny ) + p1 [ : , 1 ] * ( ny * ny - 1 ) + p1 [ : , 2 ] * ( ny * nz ) ) z = np . sum ( p1 [ : , 0 ] * ( nx * nz ) + p1 [ : , 1 ] * ( ny * nz ) + p1 [ : , 2 ] * ( nz ** 2 - 1 ) ) return np . linalg . lstsq ( M , np . array ( ( x , y , z ) ) , rcond = None ) [ 0 ]
Find the closes point for a given set of lines in 3D .
9,777
def compat_py2_py3 ( ) : if ( sys . version_info > ( 3 , 0 ) ) : def iteritems ( dictionary ) : return dictionary . items ( ) def itervalues ( dictionary ) : return dictionary . values ( ) else : def iteritems ( dictionary ) : return dictionary . iteritems ( ) def itervalues ( dictionary ) : return dictionary . itervalues ( ) return iteritems , itervalues
For Python 2 3 compatibility .
9,778
def timeslice_generator ( self ) : slice_id = 0 while slice_id < self . n_timeslices : blob = self . get_blob ( slice_id ) yield blob slice_id += 1
Uses slice ID as iterator
9,779
def get_blob ( self , index ) : blob = self . _current_blob self . r . retrieve_timeslice ( index ) timeslice_info = Table . from_template ( { 'frame_index' : self . r . frame_index , 'slice_id' : index , 'timestamp' : self . r . utc_seconds , 'nanoseconds' : self . r . utc_nanoseconds , 'n_frames' : self . r . n_frames , } , 'TimesliceInfo' ) hits = self . _extract_hits ( ) hits . group_id = index blob [ 'TimesliceInfo' ] = timeslice_info blob [ self . _hits_blob_key ] = hits return blob
Index is slice ID
9,780
def _slice_generator ( self , index ) : start , stop , step = index . indices ( len ( self ) ) for i in range ( start , stop , step ) : yield self . get_blob ( i )
A simple slice generator for iterations
9,781
def correlation_by_exemplar ( brands , exemplars , validation_scores , analyze_fn_str , outf ) : analyze_fn = getattr ( analyze , analyze_fn_str ) keys = sorted ( k for k in validation_scores . keys ( ) if k in set ( x [ 0 ] for x in brands ) ) truth = [ validation_scores [ k ] for k in keys ] result = { } outf . write ( 'exemplar\tcorr\tn_followers\n' ) outf . flush ( ) for exemplar in exemplars : single_exemplar = { exemplar : exemplars [ exemplar ] } social_scores = analyze_fn ( brands , single_exemplar ) predicted = [ social_scores [ k ] for k in keys ] outf . write ( '%s\t%g\t%d\n' % ( exemplar , scistat . pearsonr ( predicted , truth ) [ 0 ] , len ( exemplars [ exemplar ] ) ) ) outf . flush ( ) result [ exemplar ] = scistat . pearsonr ( predicted , truth ) [ 0 ] outf . close ( ) return result
Report the overall correlation with the validation scores using each exemplar in isolation .
9,782
def difference ( self , other ) : diff = ( tuple ( set ( self . plates ) - set ( other . plates ) ) , tuple ( set ( other . plates ) - set ( self . plates ) ) ) counts = map ( len , diff ) is_sub_plate = counts == [ 1 , 1 ] and diff [ 0 ] [ 0 ] . is_sub_plate ( diff [ 1 ] [ 0 ] ) if len ( other . plates ) == 1 and counts == [ 1 , 0 ] and diff [ 0 ] [ 0 ] . parent == other . plates [ 0 ] . parent : is_sub_plate = True return diff , counts , is_sub_plate
Summarise the differences between this node and the other node .
9,783
def report ( times = None , include_itrs = True , include_stats = True , delim_mode = False , format_options = None ) : if times is None : if f . root . stopped : return report_loc . report ( f . root . times , include_itrs , include_stats , delim_mode , format_options ) else : t = timer ( ) rep = report_loc . report ( collapse . collapse_times ( ) , include_itrs , include_stats , delim_mode , format_options , timer_state = 'running' ) f . root . self_cut += timer ( ) - t return rep else : if not isinstance ( times , Times ) : raise TypeError ( "Expected Times instance for param 'times' (default is root)." ) return report_loc . report ( times , include_itrs , include_stats , delim_mode , format_options )
Produce a formatted report of the current timing data .
9,784
def compare ( times_list = None , name = None , include_list = True , include_stats = True , delim_mode = False , format_options = None ) : if times_list is None : rep = '' for par_dict in itervalues ( f . root . times . par_subdvsn ) : for par_name , par_list in iteritems ( par_dict ) : rep += report_loc . compare ( par_list , par_name , include_list , include_stats , delim_mode , format_options ) else : if not isinstance ( times_list , ( list , tuple ) ) : raise TypeError ( "Expected a list/tuple of times instances for param 'times_list'." ) if not all ( [ isinstance ( times , Times ) for times in times_list ] ) : raise TypeError ( "At least one member of param 'times_list' is not a Times object." ) rep = report_loc . compare ( times_list , name , include_list , include_stats , delim_mode , format_options ) return rep
Produce a formatted comparison of timing datas .
9,785
def write_structure ( times = None ) : if times is None : return report_loc . write_structure ( f . root . times ) else : if not isinstance ( times , Times ) : raise TypeError ( "Expected Times instance for param 'times' (default is root)." ) return report_loc . write_structure ( times )
Produce a formatted record of a times data structure .
9,786
def filter_muons ( blob ) : tracks = blob [ 'McTracks' ] muons = tracks [ tracks . type == - 13 ] blob [ "Muons" ] = Table ( muons ) return blob
Write all muons from McTracks to Muons .
9,787
def parse_conf_files ( conf_paths ) : conf_file = ConfigParser . RawConfigParser ( ) conf_read = conf_file . read ( conf_paths ) conf = { } try : if conf_read : conf [ 'client_id' ] = conf_file . get ( 'runkeeper' , 'client_id' ) conf [ 'client_secret' ] = conf_file . get ( 'runkeeper' , 'client_secret' ) if conf_file . has_option ( 'runkeeper' , 'bindport' ) : conf [ 'bindport' ] = conf_file . getint ( 'runkeeper' , 'bindport' ) if conf_file . has_option ( 'runkeeper' , 'bindaddr' ) : conf [ 'bindaddr' ] = conf_file . get ( 'runkeeper' , 'bindaddr' ) if conf_file . has_option ( 'runkeeper' , 'baseurl' ) : conf [ 'baseurl' ] = conf_file . get ( 'runkeeper' , 'baseurl' ) return conf except ConfigParser . Error : raise ConfigurationError ( "Error parsing configuration file(s): %s\n" % sys . exc_info ( ) [ 1 ] ) else : raise ConfigurationError ( "No valid configuration file (%s) found." % defaultConfFilename )
Parse the configuration file and return dictionary of configuration options .
9,788
def main ( argv = None ) : cmd_opts = parse_cmdline ( argv ) [ 0 ] if cmd_opts . confpath is not None : if os . path . exists ( cmd_opts . confpath ) : conf_paths = [ cmd_opts . confpath , ] else : return "Configuration file not found: %s" % cmd_opts . confpath else : conf_paths = [ os . path . join ( path , defaultConfFilename ) for path in ( '/etc' , '.' , ) ] try : conf . update ( parse_conf_files ( conf_paths ) ) except ConfigurationError : return ( sys . exc_info ( ) [ 1 ] ) if cmd_opts . bindport is not None : conf [ 'bindport' ] = cmd_opts . bindport if cmd_opts . bindaddr is not None : conf [ 'bindaddr' ] = cmd_opts . bindaddr if cmd_opts . baseurl is not None : conf [ 'baseurl' ] = cmd_opts . baseurl if cmd_opts . devel : from bottle import debug debug ( True ) app = SessionMiddleware ( bottle . app ( ) , sessionOpts ) bottle . run ( app = app , host = conf [ 'bindaddr' ] , port = conf [ 'bindport' ] , reloader = cmd_opts . devel )
Main Block - Configure and run the Bottle Web Server .
9,789
def get_hash ( input_string ) : if os . path . islink ( input_string ) : directory , movie_hash = os . path . split ( os . readlink ( input_string ) ) input_string = movie_hash return input_string . lower ( )
Return the hash of the movie depending on the input string .
9,790
def get ( self , key ) : return self . _object_class ( json . loads ( self . _db [ key ] ) )
Get data associated with provided key .
9,791
def save ( self , key , data ) : self . _db [ key ] = json . dumps ( data ) self . _db . sync ( )
Save data associated with key .
9,792
def global_meta_data ( self ) : with switch_db ( MetaDataModel , 'hyperstream' ) : return sorted ( map ( lambda x : x . to_dict ( ) , MetaDataModel . objects ) , key = lambda x : len ( x [ 'identifier' ] . split ( '.' ) ) , reverse = True )
Get the global meta data which will be stored in a tree structure
9,793
def insert ( self , tag , identifier , parent , data ) : if self . global_plate_definitions . contains ( identifier ) : raise KeyError ( "Identifier {} already exists in tree" . format ( identifier ) ) self . global_plate_definitions . create_node ( tag = tag , identifier = identifier , parent = parent , data = data ) with switch_db ( MetaDataModel , 'hyperstream' ) : meta_data = MetaDataModel ( tag = tag , parent = parent , data = data ) meta_data . save ( ) logging . info ( "Meta data {} inserted" . format ( identifier ) )
Insert the given meta data into the database
9,794
def delete ( self , identifier ) : try : node = self . global_plate_definitions [ identifier ] except NodeIDAbsentError : logging . info ( "Meta data {} not present during deletion" . format ( identifier ) ) return self . global_plate_definitions . remove_node ( identifier ) with switch_db ( MetaDataModel , 'hyperstream' ) : meta_data = MetaDataModel . objects ( tag = node . tag , data = node . data , parent = node . bpointer ) . first ( ) if meta_data is not None : meta_data . delete ( ) logging . info ( "Meta data {} deleted" . format ( identifier ) )
Delete the meta data with the given identifier from the database
9,795
def load ( self ) : print "Loading data for %s..." % self . getName ( ) self . _dataHandle = self . _stream . data ( since = self . _since , until = self . _until , limit = self . _limit , aggregate = self . _aggregate ) self . _data = self . _dataHandle . data ( ) self . _headers = self . _dataHandle . headers ( ) print "Loaded %i rows." % len ( self )
Loads this stream by calling River View for data .
9,796
def hexbin ( x , y , color = "purple" , ** kwargs ) : if HAS_SEABORN : cmap = sns . light_palette ( color , as_cmap = True ) else : cmap = "Purples" plt . hexbin ( x , y , cmap = cmap , ** kwargs )
Seaborn - compatible hexbin plot .
9,797
def diag ( ax = None , linecolor = '0.0' , linestyle = '--' , ** kwargs ) : ax = get_ax ( ax ) xy_min = np . min ( ( ax . get_xlim ( ) , ax . get_ylim ( ) ) ) xy_max = np . max ( ( ax . get_ylim ( ) , ax . get_xlim ( ) ) ) return ax . plot ( [ xy_min , xy_max ] , [ xy_min , xy_max ] , ls = linestyle , c = linecolor , ** kwargs )
Plot the diagonal .
9,798
def automeshgrid ( x , y , step = 0.02 , xstep = None , ystep = None , pad = 0.5 , xpad = None , ypad = None ) : if xpad is None : xpad = pad if xstep is None : xstep = step if ypad is None : ypad = pad if ystep is None : ystep = step xmin = x . min ( ) - xpad xmax = x . max ( ) + xpad ymin = y . min ( ) - ypad ymax = y . max ( ) + ypad return meshgrid ( xmin , xmax , step , ymin , ymax , ystep )
Make a meshgrid inferred from data .
9,799
def prebinned_hist ( counts , binlims , ax = None , * args , ** kwargs ) : ax = get_ax ( ax ) x = bincenters ( binlims ) weights = counts return ax . hist ( x , bins = binlims , weights = weights , * args , ** kwargs )
Plot a histogram with counts binlims already given .