idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
14,300
def get_t ( self ) : if isinstance ( self . __t , int ) is False : raise TypeError ( "The type of __t must be int." ) return self . __t
getter Time .
14,301
def set_t ( self , value ) : if isinstance ( value , int ) is False : raise TypeError ( "The type of __t must be int." ) self . __t = value
setter Time .
14,302
def update_q ( self , state_key , action_key , reward_value , next_max_q ) : q = self . extract_q_df ( state_key , action_key ) new_q = q + self . alpha_value * ( reward_value + ( self . gamma_value * next_max_q ) - q ) self . save_q_df ( state_key , action_key , new_q )
Update Q - Value .
14,303
def predict_next_action ( self , state_key , next_action_list ) : if self . q_df is not None : next_action_q_df = self . q_df [ self . q_df . state_key == state_key ] next_action_q_df = next_action_q_df [ next_action_q_df . action_key . isin ( next_action_list ) ] if next_action_q_df . shape [ 0 ] == 0 : return random . choice ( next_action_list ) else : if next_action_q_df . shape [ 0 ] == 1 : max_q_action = next_action_q_df [ "action_key" ] . values [ 0 ] else : next_action_q_df = next_action_q_df . sort_values ( by = [ "q_value" ] , ascending = False ) max_q_action = next_action_q_df . iloc [ 0 , : ] [ "action_key" ] return max_q_action else : return random . choice ( next_action_list )
Predict next action by Q - Learning .
14,304
def pull ( self , arm_id , success , failure ) : self . __beta_dist_dict [ arm_id ] . observe ( success , failure )
Pull arms .
14,305
def recommend ( self , limit = 10 ) : expected_list = [ ( arm_id , beta_dist . expected_value ( ) ) for arm_id , beta_dist in self . __beta_dist_dict . items ( ) ] expected_list = sorted ( expected_list , key = lambda x : x [ 1 ] , reverse = True ) return expected_list [ : limit ]
Listup arms and expected value .
14,306
def get_time_rate ( self ) : if isinstance ( self . __time_rate , float ) is False : raise TypeError ( "The type of __time_rate must be float." ) if self . __time_rate <= 0.0 : raise ValueError ( "The value of __time_rate must be greater than 0.0" ) return self . __time_rate
getter Time rate .
14,307
def set_time_rate ( self , value ) : if isinstance ( value , float ) is False : raise TypeError ( "The type of __time_rate must be float." ) if value <= 0.0 : raise ValueError ( "The value of __time_rate must be greater than 0.0" ) self . __time_rate = value
setter Time rate .
14,308
def __calculate_sigmoid ( self ) : sigmoid = 1 / np . log ( self . t * self . time_rate + 1.1 ) return sigmoid
Function of temperature .
14,309
def __calculate_boltzmann_factor ( self , state_key , next_action_list ) : sigmoid = self . __calculate_sigmoid ( ) q_df = self . q_df [ self . q_df . state_key == state_key ] q_df = q_df [ q_df . isin ( next_action_list ) ] q_df [ "boltzmann_factor" ] = q_df [ "q_value" ] / sigmoid q_df [ "boltzmann_factor" ] = q_df [ "boltzmann_factor" ] . apply ( np . exp ) q_df [ "boltzmann_factor" ] = q_df [ "boltzmann_factor" ] / q_df [ "boltzmann_factor" ] . sum ( ) return q_df
Calculate boltzmann factor .
14,310
def get_model ( self ) : class Model ( object ) : def __init__ ( self , lstm_model ) : self . lstm_model = lstm_model return Model ( self . __lstm_model )
object of model as a function approximator which has lstm_model whose type is pydbm . rnn . lstm_model . LSTMModel .
14,311
def filter ( self , scored_list ) : if len ( scored_list ) > 0 : avg = np . mean ( [ s [ 1 ] for s in scored_list ] ) std = np . std ( [ s [ 1 ] for s in scored_list ] ) else : avg = 0 std = 0 limiter = avg + 0.5 * std mean_scored = [ ( sent_idx , score ) for ( sent_idx , score ) in scored_list if score > limiter ] return mean_scored
Filtering with std .
14,312
def search ( self , query , fields = None , page = 1 , max_records = None , flatten = True ) : if fields is None : fields = [ ] page = int ( page ) pages = float ( 'inf' ) data = { "query" : query , "page" : page , "fields" : fields , "flatten" : flatten } count = 0 while page <= pages : payload = self . _post ( self . search_path , data = data ) pages = payload [ 'metadata' ] [ 'pages' ] page += 1 data [ "page" ] = page for result in payload [ "results" ] : yield result count += 1 if max_records and count >= max_records : return
returns iterator over all records that match the given query
14,313
def adjustColors ( self , mode = 'dark' ) : rp = Game . __color_modes . get ( mode , { } ) for k , color in self . __colors . items ( ) : self . __colors [ k ] = rp . get ( color , color )
Change a few colors depending on the mode to use . The default mode doesn t assume anything and avoid using white & black colors . The dark mode use white and avoid dark blue while the light mode use black and avoid yellow to give a few examples .
14,314
def loadBestScore ( self ) : try : with open ( self . scores_file , 'r' ) as f : self . best_score = int ( f . readline ( ) , 10 ) except : return False return True
load local best score from the default file
14,315
def saveBestScore ( self ) : if self . score > self . best_score : self . best_score = self . score try : with open ( self . scores_file , 'w' ) as f : f . write ( str ( self . best_score ) ) except : return False return True
save current best score in the default file
14,316
def incScore ( self , pts ) : self . score += pts if self . score > self . best_score : self . best_score = self . score
update the current score by adding it the specified number of points
14,317
def store ( self ) : size = self . board . SIZE cells = [ ] for i in range ( size ) : for j in range ( size ) : cells . append ( str ( self . board . getCell ( j , i ) ) ) score_str = "%s\n%d" % ( ' ' . join ( cells ) , self . score ) try : with open ( self . store_file , 'w' ) as f : f . write ( score_str ) except : return False return True
save the current game session s score and data for further use
14,318
def restore ( self ) : size = self . board . SIZE try : with open ( self . store_file , 'r' ) as f : lines = f . readlines ( ) score_str = lines [ 0 ] self . score = int ( lines [ 1 ] ) except : return False score_str_list = score_str . split ( ' ' ) count = 0 for i in range ( size ) : for j in range ( size ) : value = score_str_list [ count ] self . board . setCell ( j , i , int ( value ) ) count += 1 return True
restore the saved game score and data
14,319
def loop ( self ) : pause_key = self . board . PAUSE margins = { 'left' : 4 , 'top' : 4 , 'bottom' : 4 } atexit . register ( self . showCursor ) try : self . hideCursor ( ) while True : self . clearScreen ( ) print ( self . __str__ ( margins = margins ) ) if self . board . won ( ) or not self . board . canMove ( ) : break m = self . readMove ( ) if m == pause_key : self . saveBestScore ( ) if self . store ( ) : print ( "Game successfully saved. " "Resume it with `term2048 --resume`." ) return self . score print ( "An error ocurred while saving your game." ) return None self . incScore ( self . board . move ( m ) ) except KeyboardInterrupt : self . saveBestScore ( ) return None self . saveBestScore ( ) print ( 'You won!' if self . board . won ( ) else 'Game Over' ) return self . score
main game loop . returns the final score .
14,320
def getCellStr ( self , x , y ) : c = self . board . getCell ( x , y ) if c == 0 : return '.' if self . __azmode else ' .' elif self . __azmode : az = { } for i in range ( 1 , int ( math . log ( self . board . goal ( ) , 2 ) ) ) : az [ 2 ** i ] = chr ( i + 96 ) if c not in az : return '?' s = az [ c ] elif c == 1024 : s = ' 1k' elif c == 2048 : s = ' 2k' else : s = '%3d' % c return self . __colors . get ( c , Fore . RESET ) + s + Style . RESET_ALL
return a string representation of the cell located at x y .
14,321
def boardToString ( self , margins = None ) : if margins is None : margins = { } b = self . board rg = range ( b . size ( ) ) left = ' ' * margins . get ( 'left' , 0 ) s = '\n' . join ( [ left + ' ' . join ( [ self . getCellStr ( x , y ) for x in rg ] ) for y in rg ] ) return s
return a string representation of the current board .
14,322
def canMove ( self ) : if not self . filled ( ) : return True for y in self . __size_range : for x in self . __size_range : c = self . getCell ( x , y ) if ( x < self . __size - 1 and c == self . getCell ( x + 1 , y ) ) or ( y < self . __size - 1 and c == self . getCell ( x , y + 1 ) ) : return True return False
test if a move is possible
14,323
def setCell ( self , x , y , v ) : self . cells [ y ] [ x ] = v
set the cell value at x y
14,324
def getCol ( self , x ) : return [ self . getCell ( x , i ) for i in self . __size_range ]
return the x - th column starting at 0
14,325
def setCol ( self , x , l ) : for i in xrange ( 0 , self . __size ) : self . setCell ( x , i , l [ i ] )
set the x - th column starting at 0
14,326
def __collapseLineOrCol ( self , line , d ) : if ( d == Board . LEFT or d == Board . UP ) : inc = 1 rg = xrange ( 0 , self . __size - 1 , inc ) else : inc = - 1 rg = xrange ( self . __size - 1 , 0 , inc ) pts = 0 for i in rg : if line [ i ] == 0 : continue if line [ i ] == line [ i + inc ] : v = line [ i ] * 2 if v == self . __goal : self . __won = True line [ i ] = v line [ i + inc ] = 0 pts += v return ( line , pts )
Merge tiles in a line or column according to a direction and return a tuple with the new line and the score for the move on this line
14,327
def move ( self , d , add_tile = True ) : if d == Board . LEFT or d == Board . RIGHT : chg , get = self . setLine , self . getLine elif d == Board . UP or d == Board . DOWN : chg , get = self . setCol , self . getCol else : return 0 moved = False score = 0 for i in self . __size_range : origin = get ( i ) line = self . __moveLineOrCol ( origin , d ) collapsed , pts = self . __collapseLineOrCol ( line , d ) new = self . __moveLineOrCol ( collapsed , d ) chg ( i , new ) if origin != new : moved = True score += pts if moved and add_tile : self . addTile ( ) return score
move and return the move score
14,328
def parse_cli_args ( ) : parser = argparse . ArgumentParser ( description = '2048 in your terminal' ) parser . add_argument ( '--mode' , dest = 'mode' , type = str , default = None , help = 'colors mode (dark or light)' ) parser . add_argument ( '--az' , dest = 'azmode' , action = 'store_true' , help = 'Use the letters a-z instead of numbers' ) parser . add_argument ( '--resume' , dest = 'resume' , action = 'store_true' , help = 'restart the game from where you left' ) parser . add_argument ( '-v' , '--version' , action = 'store_true' ) parser . add_argument ( '-r' , '--rules' , action = 'store_true' ) return vars ( parser . parse_args ( ) )
parse args from the CLI and return a dict
14,329
def start_game ( debug = False ) : args = parse_cli_args ( ) if args [ 'version' ] : print_version_and_exit ( ) if args [ 'rules' ] : print_rules_and_exit ( ) game = Game ( ** args ) if args [ 'resume' ] : game . restore ( ) if debug : return game return game . loop ( )
Start a new game . If debug is set to True the game object is returned and the game loop isn t fired .
14,330
def on_message ( self , message ) : message = ObjectDict ( escape . json_decode ( message ) ) if message . command == 'hello' : handshake = { 'command' : 'hello' , 'protocols' : [ 'http://livereload.com/protocols/official-7' , ] , 'serverName' : 'livereload-tornado' , } self . send_message ( handshake ) if message . command == 'info' and 'url' in message : logger . info ( 'Browser Connected: %s' % message . url ) LiveReloadHandler . waiters . add ( self )
Handshake with livereload . js
14,331
def get_content_modified_time ( cls , abspath ) : stat_result = os . stat ( abspath ) modified = datetime . datetime . utcfromtimestamp ( stat_result [ stat . ST_MTIME ] ) return modified
Returns the time that abspath was last modified .
14,332
def ignore ( self , filename ) : _ , ext = os . path . splitext ( filename ) return ext in [ '.pyc' , '.pyo' , '.o' , '.swp' ]
Ignore a given filename or not .
14,333
def watch ( self , path , func = None , delay = 0 , ignore = None ) : self . _tasks [ path ] = { 'func' : func , 'delay' : delay , 'ignore' : ignore , }
Add a task to watcher .
14,334
def already_coords ( self , address ) : m = re . search ( self . COORD_MATCH , address ) return ( m != None )
test used to see if we have coordinates or address
14,335
def coords_string_parser ( self , coords ) : lat , lon = coords . split ( ',' ) return { "lat" : lat . strip ( ) , "lon" : lon . strip ( ) , "bounds" : { } }
Pareses the address string into coordinates to match address_to_coords return object
14,336
def address_to_coords ( self , address ) : base_coords = self . BASE_COORDS [ self . region ] get_cord = self . COORD_SERVERS [ self . region ] url_options = { "q" : address , "lang" : "eng" , "origin" : "livemap" , "lat" : base_coords [ "lat" ] , "lon" : base_coords [ "lon" ] } response = requests . get ( self . WAZE_URL + get_cord , params = url_options , headers = self . HEADERS ) for response_json in response . json ( ) : if response_json . get ( 'city' ) : lat = response_json [ 'location' ] [ 'lat' ] lon = response_json [ 'location' ] [ 'lon' ] bounds = response_json [ 'bounds' ] if bounds is not None : bounds [ 'top' ] , bounds [ 'bottom' ] = max ( bounds [ 'top' ] , bounds [ 'bottom' ] ) , min ( bounds [ 'top' ] , bounds [ 'bottom' ] ) bounds [ 'left' ] , bounds [ 'right' ] = min ( bounds [ 'left' ] , bounds [ 'right' ] ) , max ( bounds [ 'left' ] , bounds [ 'right' ] ) else : bounds = { } return { "lat" : lat , "lon" : lon , "bounds" : bounds } raise WRCError ( "Cannot get coords for %s" % address )
Convert address to coordinates
14,337
def get_route ( self , npaths = 1 , time_delta = 0 ) : routing_server = self . ROUTING_SERVERS [ self . region ] url_options = { "from" : "x:%s y:%s" % ( self . start_coords [ "lon" ] , self . start_coords [ "lat" ] ) , "to" : "x:%s y:%s" % ( self . end_coords [ "lon" ] , self . end_coords [ "lat" ] ) , "at" : time_delta , "returnJSON" : "true" , "returnGeometries" : "true" , "returnInstructions" : "true" , "timeout" : 60000 , "nPaths" : npaths , "options" : "AVOID_TRAILS:t" , } if self . vehicle_type : url_options [ "vehicleType" ] = self . vehicle_type response = requests . get ( self . WAZE_URL + routing_server , params = url_options , headers = self . HEADERS ) response . encoding = 'utf-8' response_json = self . _check_response ( response ) if response_json : if 'error' in response_json : raise WRCError ( response_json . get ( "error" ) ) else : if response_json . get ( "alternatives" ) : return [ alt [ 'response' ] for alt in response_json [ 'alternatives' ] ] if npaths > 1 : return [ response_json [ 'response' ] ] return response_json [ 'response' ] else : raise WRCError ( "empty response" )
Get route data from waze
14,338
def _add_up_route ( self , results , real_time = True , stop_at_bounds = False ) : start_bounds = self . start_coords [ 'bounds' ] end_bounds = self . end_coords [ 'bounds' ] def between ( target , min , max ) : return target > min and target < max time = 0 distance = 0 for segment in results : if stop_at_bounds and segment . get ( 'path' ) : x = segment [ 'path' ] [ 'x' ] y = segment [ 'path' ] [ 'y' ] if ( between ( x , start_bounds . get ( 'left' , 0 ) , start_bounds . get ( 'right' , 0 ) ) or between ( x , end_bounds . get ( 'left' , 0 ) , end_bounds . get ( 'right' , 0 ) ) ) and ( between ( y , start_bounds . get ( 'bottom' , 0 ) , start_bounds . get ( 'top' , 0 ) ) or between ( y , end_bounds . get ( 'bottom' , 0 ) , end_bounds . get ( 'top' , 0 ) ) ) : continue time += segment [ 'crossTime' if real_time else 'crossTimeWithoutRealTime' ] distance += segment [ 'length' ] route_time = time / 60.0 route_distance = distance / 1000.0 return route_time , route_distance
Calculate route time and distance .
14,339
def calc_route_info ( self , real_time = True , stop_at_bounds = False , time_delta = 0 ) : route = self . get_route ( 1 , time_delta ) results = route [ 'results' ] route_time , route_distance = self . _add_up_route ( results , real_time = real_time , stop_at_bounds = stop_at_bounds ) self . log . info ( 'Time %.2f minutes, distance %.2f km.' , route_time , route_distance ) return route_time , route_distance
Calculate best route info .
14,340
def calc_all_routes_info ( self , npaths = 3 , real_time = True , stop_at_bounds = False , time_delta = 0 ) : routes = self . get_route ( npaths , time_delta ) results = { route [ 'routeName' ] : self . _add_up_route ( route [ 'results' ] , real_time = real_time , stop_at_bounds = stop_at_bounds ) for route in routes } route_time = [ route [ 0 ] for route in results . values ( ) ] route_distance = [ route [ 1 ] for route in results . values ( ) ] self . log . info ( 'Time %.2f - %.2f minutes, distance %.2f - %.2f km.' , min ( route_time ) , max ( route_time ) , min ( route_distance ) , max ( route_distance ) ) return results
Calculate all route infos .
14,341
def _initialize ( self ) : self . _key_prefix = self . _config . get ( 'redis' , 'key_prefix' ) self . _job_expire_interval = int ( self . _config . get ( 'sharq' , 'job_expire_interval' ) ) self . _default_job_requeue_limit = int ( self . _config . get ( 'sharq' , 'default_job_requeue_limit' ) ) redis_connection_type = self . _config . get ( 'redis' , 'conn_type' ) db = self . _config . get ( 'redis' , 'db' ) if redis_connection_type == 'unix_sock' : self . _r = redis . StrictRedis ( db = db , unix_socket_path = self . _config . get ( 'redis' , 'unix_socket_path' ) ) elif redis_connection_type == 'tcp_sock' : self . _r = redis . StrictRedis ( db = db , host = self . _config . get ( 'redis' , 'host' ) , port = self . _config . get ( 'redis' , 'port' ) ) self . _load_lua_scripts ( )
Read the SharQ configuration and set appropriate variables . Open a redis connection pool and load all the Lua scripts .
14,342
def _load_config ( self ) : self . _config = ConfigParser . SafeConfigParser ( ) self . _config . read ( self . config_path )
Read the configuration file and load it into memory .
14,343
def _load_lua_scripts ( self ) : lua_script_path = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'scripts/lua' ) with open ( os . path . join ( lua_script_path , 'enqueue.lua' ) , 'r' ) as enqueue_file : self . _lua_enqueue_script = enqueue_file . read ( ) self . _lua_enqueue = self . _r . register_script ( self . _lua_enqueue_script ) with open ( os . path . join ( lua_script_path , 'dequeue.lua' ) , 'r' ) as dequeue_file : self . _lua_dequeue_script = dequeue_file . read ( ) self . _lua_dequeue = self . _r . register_script ( self . _lua_dequeue_script ) with open ( os . path . join ( lua_script_path , 'finish.lua' ) , 'r' ) as finish_file : self . _lua_finish_script = finish_file . read ( ) self . _lua_finish = self . _r . register_script ( self . _lua_finish_script ) with open ( os . path . join ( lua_script_path , 'interval.lua' ) , 'r' ) as interval_file : self . _lua_interval_script = interval_file . read ( ) self . _lua_interval = self . _r . register_script ( self . _lua_interval_script ) with open ( os . path . join ( lua_script_path , 'requeue.lua' ) , 'r' ) as requeue_file : self . _lua_requeue_script = requeue_file . read ( ) self . _lua_requeue = self . _r . register_script ( self . _lua_requeue_script ) with open ( os . path . join ( lua_script_path , 'metrics.lua' ) , 'r' ) as metrics_file : self . _lua_metrics_script = metrics_file . read ( ) self . _lua_metrics = self . _r . register_script ( self . _lua_metrics_script )
Loads all lua scripts required by SharQ .
14,344
def enqueue ( self , payload , interval , job_id , queue_id , queue_type = 'default' , requeue_limit = None ) : if not is_valid_interval ( interval ) : raise BadArgumentException ( '`interval` has an invalid value.' ) if not is_valid_identifier ( job_id ) : raise BadArgumentException ( '`job_id` has an invalid value.' ) if not is_valid_identifier ( queue_id ) : raise BadArgumentException ( '`queue_id` has an invalid value.' ) if not is_valid_identifier ( queue_type ) : raise BadArgumentException ( '`queue_type` has an invalid value.' ) if requeue_limit is None : requeue_limit = self . _default_job_requeue_limit if not is_valid_requeue_limit ( requeue_limit ) : raise BadArgumentException ( '`requeue_limit` has an invalid value.' ) try : serialized_payload = serialize_payload ( payload ) except TypeError as e : raise BadArgumentException ( e . message ) timestamp = str ( generate_epoch ( ) ) keys = [ self . _key_prefix , queue_type ] args = [ timestamp , queue_id , job_id , '"%s"' % serialized_payload , interval , requeue_limit ] self . _lua_enqueue ( keys = keys , args = args ) response = { 'status' : 'queued' } return response
Enqueues the job into the specified queue_id of a particular queue_type
14,345
def dequeue ( self , queue_type = 'default' ) : if not is_valid_identifier ( queue_type ) : raise BadArgumentException ( '`queue_type` has an invalid value.' ) timestamp = str ( generate_epoch ( ) ) keys = [ self . _key_prefix , queue_type ] args = [ timestamp , self . _job_expire_interval ] dequeue_response = self . _lua_dequeue ( keys = keys , args = args ) if len ( dequeue_response ) < 4 : response = { 'status' : 'failure' } return response queue_id , job_id , payload , requeues_remaining = dequeue_response payload = deserialize_payload ( payload [ 1 : - 1 ] ) response = { 'status' : 'success' , 'queue_id' : queue_id , 'job_id' : job_id , 'payload' : payload , 'requeues_remaining' : int ( requeues_remaining ) } return response
Dequeues a job from any of the ready queues based on the queue_type . If no job is ready returns a failure status .
14,346
def interval ( self , interval , queue_id , queue_type = 'default' ) : if not is_valid_interval ( interval ) : raise BadArgumentException ( '`interval` has an invalid value.' ) if not is_valid_identifier ( queue_id ) : raise BadArgumentException ( '`queue_id` has an invalid value.' ) if not is_valid_identifier ( queue_type ) : raise BadArgumentException ( '`queue_type` has an invalid value.' ) interval_hmap_key = '%s:interval' % self . _key_prefix interval_queue_key = '%s:%s' % ( queue_type , queue_id ) keys = [ interval_hmap_key , interval_queue_key ] args = [ interval ] interval_response = self . _lua_interval ( keys = keys , args = args ) if interval_response == 0 : response = { 'status' : 'failure' } else : response = { 'status' : 'success' } return response
Updates the interval for a specific queue_id of a particular queue type .
14,347
def is_valid_identifier ( identifier ) : if not isinstance ( identifier , basestring ) : return False if len ( identifier ) > 100 or len ( identifier ) < 1 : return False condensed_form = set ( list ( identifier . lower ( ) ) ) return condensed_form . issubset ( VALID_IDENTIFIER_SET )
Checks if the given identifier is valid or not . A valid identifier may consists of the following characters with a maximum length of 100 characters minimum of 1 character .
14,348
def is_valid_interval ( interval ) : if not isinstance ( interval , ( int , long ) ) : return False if interval <= 0 : return False return True
Checks if the given interval is valid . A valid interval is always a positive non - zero integer value .
14,349
def is_valid_requeue_limit ( requeue_limit ) : if not isinstance ( requeue_limit , ( int , long ) ) : return False if requeue_limit <= - 2 : return False return True
Checks if the given requeue limit is valid . A valid requeue limit is always greater than or equal to - 1 .
14,350
def get_search_names ( name ) : parts = re . split ( '[-_.]' , name ) if len ( parts ) == 1 : return parts result = set ( ) for i in range ( len ( parts ) - 1 , 0 , - 1 ) : for s1 in '-_.' : prefix = s1 . join ( parts [ : i ] ) for s2 in '-_.' : suffix = s2 . join ( parts [ i : ] ) for s3 in '-_.' : result . add ( s3 . join ( [ prefix , suffix ] ) ) return list ( result )
Return a list of values to search on when we are looking for a package with the given name .
14,351
def alter_old_distutils_request ( request : WSGIRequest ) : body = request . body if request . POST or request . FILES : return new_body = BytesIO ( ) content_type , opts = parse_header ( request . META [ 'CONTENT_TYPE' ] . encode ( 'ascii' ) ) parts = body . split ( b'\n--' + opts [ 'boundary' ] + b'\n' ) for part in parts : if b'\n\n' not in part : continue headers , content = part . split ( b'\n\n' , 1 ) if not headers : continue new_body . write ( b'--' + opts [ 'boundary' ] + b'\r\n' ) new_body . write ( headers . replace ( b'\n' , b'\r\n' ) ) new_body . write ( b'\r\n\r\n' ) new_body . write ( content ) new_body . write ( b'\r\n' ) new_body . write ( b'--' + opts [ 'boundary' ] + b'--\r\n' ) request . _body = new_body . getvalue ( ) request . META [ 'CONTENT_LENGTH' ] = len ( request . _body ) if hasattr ( request , '_files' ) : delattr ( request , '_files' ) if hasattr ( request , '_post' ) : delattr ( request , '_post' )
Alter the request body for compatibility with older distutils clients
14,352
def delete_files ( sender , ** kwargs ) : instance = kwargs [ 'instance' ] if not hasattr ( instance . distribution , 'path' ) : return if not os . path . exists ( instance . distribution . path ) : return is_referenced = ( instance . __class__ . objects . filter ( distribution = instance . distribution ) . exclude ( pk = instance . _get_pk_val ( ) ) . exists ( ) ) if is_referenced : return try : instance . distribution . storage . delete ( instance . distribution . path ) except Exception : logger . exception ( 'Error when trying to delete file %s of package %s:' % ( instance . pk , instance . distribution . path ) )
Signal callback for deleting old files when database item is deleted
14,353
def md5_hash_file ( fh ) : md5 = hashlib . md5 ( ) while True : data = fh . read ( 8192 ) if not data : break md5 . update ( data ) return md5 . hexdigest ( )
Return the md5 hash of the given file - object
14,354
def get_versio_versioning_scheme ( full_class_path ) : module_path = '.' . join ( full_class_path . split ( '.' ) [ 0 : - 1 ] ) class_name = full_class_path . split ( '.' ) [ - 1 ] try : module = importlib . import_module ( module_path ) except ImportError : raise RuntimeError ( 'Invalid specified Versio schema {}' . format ( full_class_path ) ) try : return getattr ( module , class_name ) except AttributeError : raise RuntimeError ( 'Could not find Versio schema class {!r} inside {!r} module.' . format ( class_name , module_path ) )
Return a class based on it s full path
14,355
def search ( spec , operator = 'and' ) : field_map = { 'name' : 'name__icontains' , 'summary' : 'releases__summary__icontains' , } query_filter = None for field , values in spec . items ( ) : for value in values : if field not in field_map : continue field_filter = Q ( ** { field_map [ field ] : value } ) if not query_filter : query_filter = field_filter continue if operator == 'and' : query_filter &= field_filter else : query_filter |= field_filter result = [ ] packages = models . Package . objects . filter ( query_filter ) . all ( ) [ : 20 ] for package in packages : release = package . releases . all ( ) [ 0 ] result . append ( { 'name' : package . name , 'summary' : release . summary , 'version' : release . version , '_pypi_ordering' : 0 , } ) return result
Implement xmlrpc search command .
14,356
def credentials_required ( view_func ) : @ wraps ( view_func , assigned = available_attrs ( view_func ) ) def decorator ( request , * args , ** kwargs ) : if settings . LOCALSHOP_USE_PROXIED_IP : try : ip_addr = request . META [ 'HTTP_X_FORWARDED_FOR' ] except KeyError : return HttpResponseForbidden ( 'No permission' ) else : ip_addr = ip_addr . split ( "," ) [ 0 ] . strip ( ) else : ip_addr = request . META [ 'REMOTE_ADDR' ] if CIDR . objects . has_access ( ip_addr , with_credentials = False ) : return view_func ( request , * args , ** kwargs ) if not CIDR . objects . has_access ( ip_addr , with_credentials = True ) : return HttpResponseForbidden ( 'No permission' ) if request . user . is_authenticated ( ) : return view_func ( request , * args , ** kwargs ) user = authenticate_user ( request ) if user is not None : login ( request , user ) return view_func ( request , * args , ** kwargs ) return HttpResponseUnauthorized ( content = 'Authorization Required' ) return decorator
This decorator should be used with views that need simple authentication against Django s authentication framework .
14,357
def no_duplicates ( function , * args , ** kwargs ) : @ wraps ( function ) def wrapper ( self , * args , ** kwargs ) : key = generate_key ( function , * args , ** kwargs ) try : function ( self , * args , ** kwargs ) finally : logging . info ( 'Removing key %s' , key ) cache . delete ( key ) return wrapper
Makes sure that no duplicated tasks are enqueued .
14,358
def download_file ( pk ) : release_file = models . ReleaseFile . objects . get ( pk = pk ) logging . info ( "Downloading %s" , release_file . url ) proxies = None if settings . LOCALSHOP_HTTP_PROXY : proxies = settings . LOCALSHOP_HTTP_PROXY response = requests . get ( release_file . url , stream = True , proxies = proxies ) filename = os . path . basename ( release_file . url ) if 'content-length' in response . headers : size = int ( response . headers [ 'content-length' ] ) else : size = len ( response . content ) default_content_type = 'application/octet-stream' content_type = response . headers . get ( 'content-type' ) if content_type is None or content_type == default_content_type : content_type = mimetypes . guess_type ( filename ) [ 0 ] or default_content_type with TemporaryUploadedFile ( name = filename , size = size , charset = 'utf-8' , content_type = content_type ) as temp_file : temp_file . write ( response . content ) temp_file . seek ( 0 ) md5_hash = md5_hash_file ( temp_file ) if md5_hash != release_file . md5_digest : logging . error ( "MD5 hash mismatch: %s (expected: %s)" % ( md5_hash , release_file . md5_digest ) ) return release_file . distribution . save ( filename , temp_file ) release_file . save ( ) logging . info ( "Complete" )
Download the file reference in models . ReleaseFile with the given pk .
14,359
def handle_register_or_upload ( post_data , files , user , repository ) : name = post_data . get ( 'name' ) version = post_data . get ( 'version' ) if settings . LOCALSHOP_VERSIONING_TYPE : scheme = get_versio_versioning_scheme ( settings . LOCALSHOP_VERSIONING_TYPE ) try : Version ( version , scheme = scheme ) except AttributeError : response = HttpResponseBadRequest ( reason = "Invalid version supplied '{!s}' for '{!s}' scheme." . format ( version , settings . LOCALSHOP_VERSIONING_TYPE ) ) return response if not name or not version : logger . info ( "Missing name or version for package" ) return HttpResponseBadRequest ( 'No name or version given' ) try : condition = Q ( ) for search_name in get_search_names ( name ) : condition |= Q ( name__iexact = search_name ) package = repository . packages . get ( condition ) if not package . is_local : return HttpResponseBadRequest ( '%s is a pypi package!' % package . name ) try : release = package . releases . get ( version = version ) except ObjectDoesNotExist : release = None except ObjectDoesNotExist : package = None release = None form = forms . ReleaseForm ( post_data , instance = release ) if not form . is_valid ( ) : return HttpResponseBadRequest ( reason = form . errors . values ( ) [ 0 ] [ 0 ] ) if not package : pkg_form = forms . PackageForm ( post_data , repository = repository ) if not pkg_form . is_valid ( ) : return HttpResponseBadRequest ( reason = six . next ( six . itervalues ( pkg_form . errors ) ) [ 0 ] ) package = pkg_form . save ( ) release = form . save ( commit = False ) release . package = package release . save ( ) if files : files = { 'distribution' : files [ 'content' ] } filename = files [ 'distribution' ] . _name try : release_file = release . files . get ( filename = filename ) if settings . LOCALSHOP_RELEASE_OVERWRITE is False : message = 'That it already released, please bump version.' return HttpResponseBadRequest ( message ) except ObjectDoesNotExist : release_file = models . ReleaseFile ( release = release , filename = filename ) form_file = forms . ReleaseFileForm ( post_data , files , instance = release_file ) if not form_file . is_valid ( ) : return HttpResponseBadRequest ( 'ERRORS %s' % form_file . errors ) release_file = form_file . save ( commit = False ) release_file . save ( ) return HttpResponse ( )
Process a register or upload comment issued via distutils .
14,360
def download ( self ) : from . tasks import download_file if not settings . LOCALSHOP_ISOLATED : download_file . delay ( pk = self . pk ) else : download_file ( pk = self . pk )
Start a celery task to download the release file from pypi .
14,361
def dispatch_queue ( loader ) : queue = loader . _queue loader . _queue = [ ] max_batch_size = loader . max_batch_size if max_batch_size and max_batch_size < len ( queue ) : chunks = get_chunks ( queue , max_batch_size ) for chunk in chunks : dispatch_queue_batch ( loader , chunk ) else : dispatch_queue_batch ( loader , queue )
Given the current state of a Loader instance perform a batch load from its current queue .
14,362
def failed_dispatch ( loader , queue , error ) : for l in queue : loader . clear ( l . key ) l . reject ( error )
Do not cache individual loads if the entire batch dispatch fails but still reject each request so they do not hang .
14,363
def load ( self , key = None ) : if key is None : raise TypeError ( ( "The loader.load() function must be called with a value," + "but got: {}." ) . format ( key ) ) cache_key = self . get_cache_key ( key ) if self . cache : cached_promise = self . _promise_cache . get ( cache_key ) if cached_promise : return cached_promise promise = Promise ( partial ( self . do_resolve_reject , key ) ) if self . cache : self . _promise_cache [ cache_key ] = promise return promise
Loads a key returning a Promise for the value represented by that key .
14,364
def load_many ( self , keys ) : if not isinstance ( keys , Iterable ) : raise TypeError ( ( "The loader.loadMany() function must be called with Array<key> " + "but got: {}." ) . format ( keys ) ) return Promise . all ( [ self . load ( key ) for key in keys ] )
Loads multiple keys promising an array of values
14,365
def clear ( self , key ) : cache_key = self . get_cache_key ( key ) self . _promise_cache . pop ( cache_key , None ) return self
Clears the value at key from the cache if it exists . Returns itself for method chaining .
14,366
def prime ( self , key , value ) : cache_key = self . get_cache_key ( key ) if cache_key not in self . _promise_cache : if isinstance ( value , Exception ) : promise = Promise . reject ( value ) else : promise = Promise . resolve ( value ) self . _promise_cache [ cache_key ] = promise return self
Adds the provied key and value to the cache . If the key already exists no change is made . Returns itself for method chaining .
14,367
def get_complete_version ( version = None ) : if version is None : from promise import VERSION return VERSION else : assert len ( version ) == 5 assert version [ 3 ] in ( "alpha" , "beta" , "rc" , "final" ) return version
Returns a tuple of the promise version . If version argument is non - empty then checks for correctness of the tuple provided .
14,368
def _xcorr_interp ( ccc , dt ) : if ccc . shape [ 0 ] == 1 : cc = ccc [ 0 ] else : cc = ccc cc_curvature = np . concatenate ( ( np . zeros ( 1 ) , np . diff ( cc , 2 ) , np . zeros ( 1 ) ) ) cc_t = np . arange ( 0 , len ( cc ) * dt , dt ) peak_index = cc . argmax ( ) first_sample = peak_index while first_sample > 0 and cc_curvature [ first_sample - 1 ] <= 0 : first_sample -= 1 last_sample = peak_index while last_sample < len ( cc ) - 1 and cc_curvature [ last_sample + 1 ] <= 0 : last_sample += 1 num_samples = last_sample - first_sample + 1 if num_samples < 3 : msg = "Less than 3 samples selected for fit to cross " + "correlation: %s" % num_samples raise IndexError ( msg ) if num_samples < 5 : msg = "Less than 5 samples selected for fit to cross " + "correlation: %s" % num_samples warnings . warn ( msg ) coeffs , residual = scipy . polyfit ( cc_t [ first_sample : last_sample + 1 ] , cc [ first_sample : last_sample + 1 ] , deg = 2 , full = True ) [ : 2 ] if coeffs [ 0 ] >= 0 : msg = "Fitted parabola opens upwards!" warnings . warn ( msg ) if residual > 0.1 : msg = "Residual in quadratic fit to cross correlation maximum " + "larger than 0.1: %s" % residual warnings . warn ( msg ) shift = - coeffs [ 1 ] / 2.0 / coeffs [ 0 ] coeff = ( 4 * coeffs [ 0 ] * coeffs [ 2 ] - coeffs [ 1 ] ** 2 ) / ( 4 * coeffs [ 0 ] ) return shift , coeff
Intrpolate around the maximum correlation value for sub - sample precision .
14,369
def _day_loop ( detection_streams , template , min_cc , detections , horizontal_chans , vertical_chans , interpolate , cores , parallel , debug = 0 ) : if len ( detection_streams ) == 0 : return Catalog ( ) if not cores : num_cores = cpu_count ( ) else : num_cores = cores if num_cores > len ( detection_streams ) : num_cores = len ( detection_streams ) if parallel : pool = Pool ( processes = num_cores ) debug_print ( 'Made pool of %i workers' % num_cores , 4 , debug ) results = [ pool . apply_async ( _channel_loop , ( detection_streams [ i ] , ) , { 'template' : template , 'min_cc' : min_cc , 'detection_id' : detections [ i ] . id , 'interpolate' : interpolate , 'i' : i , 'pre_lag_ccsum' : detections [ i ] . detect_val , 'detect_chans' : detections [ i ] . no_chans , 'horizontal_chans' : horizontal_chans , 'vertical_chans' : vertical_chans } ) for i in range ( len ( detection_streams ) ) ] pool . close ( ) try : events_list = [ p . get ( ) for p in results ] except KeyboardInterrupt as e : pool . terminate ( ) raise e pool . join ( ) events_list . sort ( key = lambda tup : tup [ 0 ] ) else : events_list = [ ] for i in range ( len ( detection_streams ) ) : events_list . append ( _channel_loop ( detection = detection_streams [ i ] , template = template , min_cc = min_cc , detection_id = detections [ i ] . id , interpolate = interpolate , i = i , pre_lag_ccsum = detections [ i ] . detect_val , detect_chans = detections [ i ] . no_chans , horizontal_chans = horizontal_chans , vertical_chans = vertical_chans , debug = debug ) ) temp_catalog = Catalog ( ) temp_catalog . events = [ event_tup [ 1 ] for event_tup in events_list ] return temp_catalog
Function to loop through multiple detections for one template .
14,370
def read_trigger_parameters ( filename ) : parameters = [ ] f = open ( filename , 'r' ) print ( 'Reading parameters with the following header:' ) for line in f : if line [ 0 ] == '#' : print ( line . rstrip ( '\n' ) . lstrip ( '\n' ) ) else : parameter_dict = ast . literal_eval ( line ) trig_par = TriggerParameters ( parameter_dict ) parameters . append ( trig_par ) f . close ( ) return parameters
Read the trigger parameters into trigger_parameter classes .
14,371
def _channel_loop ( tr , parameters , max_trigger_length = 60 , despike = False , debug = 0 ) : for par in parameters : if par [ 'station' ] == tr . stats . station and par [ 'channel' ] == tr . stats . channel : parameter = par break else : msg = 'No parameters set for station ' + str ( tr . stats . station ) warnings . warn ( msg ) return [ ] triggers = [ ] if debug > 0 : print ( tr ) tr . detrend ( 'simple' ) if despike : median_filter ( tr ) if parameter [ 'lowcut' ] and parameter [ 'highcut' ] : tr . filter ( 'bandpass' , freqmin = parameter [ 'lowcut' ] , freqmax = parameter [ 'highcut' ] ) elif parameter [ 'lowcut' ] : tr . filter ( 'highpass' , freq = parameter [ 'lowcut' ] ) elif parameter [ 'highcut' ] : tr . filter ( 'lowpass' , freq = parameter [ 'highcut' ] ) df = tr . stats . sampling_rate cft = recursive_sta_lta ( tr . data , int ( parameter [ 'sta_len' ] * df ) , int ( parameter [ 'lta_len' ] * df ) ) if max_trigger_length : trig_args = { 'max_len_delete' : True } trig_args [ 'max_len' ] = int ( max_trigger_length * df + 0.5 ) if debug > 3 : plot_trigger ( tr , cft , parameter [ 'thr_on' ] , parameter [ 'thr_off' ] ) tmp_trigs = trigger_onset ( cft , float ( parameter [ 'thr_on' ] ) , float ( parameter [ 'thr_off' ] ) , ** trig_args ) for on , off in tmp_trigs : cft_peak = tr . data [ on : off ] . max ( ) cft_std = tr . data [ on : off ] . std ( ) on = tr . stats . starttime + float ( on ) / tr . stats . sampling_rate off = tr . stats . starttime + float ( off ) / tr . stats . sampling_rate triggers . append ( ( on . timestamp , off . timestamp , tr . id , cft_peak , cft_std ) ) return triggers
Internal loop for parellel processing .
14,372
def write ( self , filename , append = True ) : header = ' ' . join ( [ '# User:' , getpass . getuser ( ) , '\n# Creation date:' , str ( UTCDateTime ( ) ) , '\n# EQcorrscan version:' , str ( eqcorrscan . __version__ ) , '\n\n\n' ] ) if append : f = open ( filename , 'a' ) else : f = open ( filename , 'w' ) f . write ( header ) parameters = self . __dict__ f . write ( str ( parameters ) ) f . write ( '\n' ) f . close ( ) return
Write the parameters to a file as a human - readable series of dicts .
14,373
def _get_lib_name ( lib ) : ext_suffix = sysconfig . get_config_var ( "EXT_SUFFIX" ) if not ext_suffix : try : ext_suffix = sysconfig . get_config_var ( "SO" ) except Exception as e : msg = ( "Empty 'EXT_SUFFIX' encountered while building CDLL " "filename and fallback to 'SO' variable failed " "(%s)." % str ( e ) ) warnings . warn ( msg ) pass if ext_suffix : libname = lib + ext_suffix return libname
Helper function to get an architecture and Python version specific library filename .
14,374
def _load_cdll ( name ) : libname = _get_lib_name ( name ) libdir = os . path . join ( os . path . dirname ( __file__ ) , 'lib' ) libpath = os . path . join ( libdir , libname ) static_fftw = os . path . join ( libdir , 'libfftw3-3.dll' ) static_fftwf = os . path . join ( libdir , 'libfftw3f-3.dll' ) try : fftw_lib = ctypes . CDLL ( str ( static_fftw ) ) fftwf_lib = ctypes . CDLL ( str ( static_fftwf ) ) except : pass try : cdll = ctypes . CDLL ( str ( libpath ) ) except Exception as e : msg = 'Could not load shared library "%s".\n\n %s' % ( libname , str ( e ) ) raise ImportError ( msg ) return cdll
Helper function to load a shared library built during installation with ctypes .
14,375
def cross_net ( stream , env = False , debug = 0 , master = False ) : event = Event ( ) event . origins . append ( Origin ( ) ) event . creation_info = CreationInfo ( author = 'EQcorrscan' , creation_time = UTCDateTime ( ) ) event . comments . append ( Comment ( text = 'cross_net' ) ) samp_rate = stream [ 0 ] . stats . sampling_rate if not env : if debug > 2 : print ( 'Using the raw data' ) st = stream . copy ( ) st . resample ( samp_rate ) else : st = stream . copy ( ) if debug > 2 : print ( 'Computing envelope' ) for tr in st : tr . resample ( samp_rate ) tr . data = envelope ( tr . data ) if not master : master = st [ 0 ] else : master = master master . data = np . nan_to_num ( master . data ) for i , tr in enumerate ( st ) : tr . data = np . nan_to_num ( tr . data ) if debug > 2 : msg = ' ' . join ( [ 'Comparing' , tr . stats . station , tr . stats . channel , 'with the master' ] ) print ( msg ) shift_len = int ( 0.3 * len ( tr ) ) if debug > 2 : print ( 'Shift length is set to ' + str ( shift_len ) + ' samples' ) index , cc = xcorr ( master , tr , shift_len ) wav_id = WaveformStreamID ( station_code = tr . stats . station , channel_code = tr . stats . channel , network_code = tr . stats . network ) event . picks . append ( Pick ( time = tr . stats . starttime + ( index / tr . stats . sampling_rate ) , waveform_id = wav_id , phase_hint = 'S' , onset = 'emergent' ) ) if debug > 2 : print ( event . picks [ i ] ) event . origins [ 0 ] . time = min ( [ pick . time for pick in event . picks ] ) - 1 del st return event
Generate picks using a simple envelope cross - correlation .
14,376
def cross_chan_coherence ( st1 , st2 , allow_shift = False , shift_len = 0.2 , i = 0 , xcorr_func = 'time_domain' ) : cccoh = 0.0 kchan = 0 array_xcorr = get_array_xcorr ( xcorr_func ) for tr in st1 : tr2 = st2 . select ( station = tr . stats . station , channel = tr . stats . channel ) if len ( tr2 ) > 0 and tr . stats . sampling_rate != tr2 [ 0 ] . stats . sampling_rate : warnings . warn ( 'Sampling rates do not match, not using: %s.%s' % ( tr . stats . station , tr . stats . channel ) ) if len ( tr2 ) > 0 and allow_shift : index , corval = xcorr ( tr , tr2 [ 0 ] , int ( shift_len * tr . stats . sampling_rate ) ) cccoh += corval kchan += 1 elif len ( tr2 ) > 0 : min_len = min ( len ( tr . data ) , len ( tr2 [ 0 ] . data ) ) cccoh += array_xcorr ( np . array ( [ tr . data [ 0 : min_len ] ] ) , tr2 [ 0 ] . data [ 0 : min_len ] , [ 0 ] ) [ 0 ] [ 0 ] [ 0 ] kchan += 1 if kchan : cccoh /= kchan return np . round ( cccoh , 6 ) , i else : warnings . warn ( 'No matching channels' ) return 0 , i
Calculate cross - channel coherency .
14,377
def distance_matrix ( stream_list , allow_shift = False , shift_len = 0 , cores = 1 ) : dist_mat = np . array ( [ np . array ( [ 0.0 ] * len ( stream_list ) ) ] * len ( stream_list ) ) for i , master in enumerate ( stream_list ) : pool = Pool ( processes = cores ) results = [ pool . apply_async ( cross_chan_coherence , args = ( master , stream_list [ j ] , allow_shift , shift_len , j ) ) for j in range ( len ( stream_list ) ) ] pool . close ( ) dist_list = [ p . get ( ) for p in results ] pool . join ( ) dist_list . sort ( key = lambda tup : tup [ 1 ] ) for j in range ( i , len ( stream_list ) ) : if i == j : dist_mat [ i , j ] = 0.0 else : dist_mat [ i , j ] = 1 - dist_list [ j ] [ 0 ] for i in range ( 1 , len ( stream_list ) ) : for j in range ( i ) : dist_mat [ i , j ] = dist_mat . T [ i , j ] return dist_mat
Compute distance matrix for waveforms based on cross - correlations .
14,378
def cluster ( template_list , show = True , corr_thresh = 0.3 , allow_shift = False , shift_len = 0 , save_corrmat = False , cores = 'all' , debug = 1 ) : if cores == 'all' : num_cores = cpu_count ( ) else : num_cores = cores stream_list = [ x [ 0 ] for x in template_list ] if debug >= 1 : print ( 'Computing the distance matrix using %i cores' % num_cores ) dist_mat = distance_matrix ( stream_list , allow_shift , shift_len , cores = num_cores ) if save_corrmat : np . save ( 'dist_mat.npy' , dist_mat ) if debug >= 1 : print ( 'Saved the distance matrix as dist_mat.npy' ) dist_vec = squareform ( dist_mat ) if debug >= 1 : print ( 'Computing linkage' ) Z = linkage ( dist_vec ) if show : if debug >= 1 : print ( 'Plotting the dendrogram' ) dendrogram ( Z , color_threshold = 1 - corr_thresh , distance_sort = 'ascending' ) plt . show ( ) if debug >= 1 : print ( 'Clustering' ) indices = fcluster ( Z , t = 1 - corr_thresh , criterion = 'distance' ) group_ids = list ( set ( indices ) ) if debug >= 1 : msg = ' ' . join ( [ 'Found' , str ( len ( group_ids ) ) , 'groups' ] ) print ( msg ) indices = [ ( indices [ i ] , i ) for i in range ( len ( indices ) ) ] indices . sort ( key = lambda tup : tup [ 0 ] ) groups = [ ] if debug >= 1 : print ( 'Extracting and grouping' ) for group_id in group_ids : group = [ ] for ind in indices : if ind [ 0 ] == group_id : group . append ( template_list [ ind [ 1 ] ] ) elif ind [ 0 ] > group_id : groups . append ( group ) break groups . append ( group ) return groups
Cluster template waveforms based on average correlations .
14,379
def SVD ( stream_list , full = False ) : warnings . warn ( 'Depreciated, use svd instead.' ) return svd ( stream_list = stream_list , full = full )
Depreciated . Use svd .
14,380
def svd ( stream_list , full = False ) : stachans = list ( set ( [ ( tr . stats . station , tr . stats . channel ) for st in stream_list for tr in st ] ) ) stachans . sort ( ) svalues = [ ] svectors = [ ] uvectors = [ ] for stachan in stachans : lengths = [ ] for st in stream_list : tr = st . select ( station = stachan [ 0 ] , channel = stachan [ 1 ] ) if len ( tr ) > 0 : tr = tr [ 0 ] else : warnings . warn ( 'Stream does not contain %s' % '.' . join ( list ( stachan ) ) ) continue lengths . append ( len ( tr . data ) ) min_length = min ( lengths ) for stream in stream_list : chan = stream . select ( station = stachan [ 0 ] , channel = stachan [ 1 ] ) if chan : if len ( chan [ 0 ] . data ) > min_length : if abs ( len ( chan [ 0 ] . data ) - min_length ) > 0.1 * chan [ 0 ] . stats . sampling_rate : raise IndexError ( 'More than 0.1 s length ' 'difference, align and fix' ) warnings . warn ( 'Channels are not equal length, trimming' ) chan [ 0 ] . data = chan [ 0 ] . data [ 0 : min_length ] if 'chan_mat' not in locals ( ) : chan_mat = chan [ 0 ] . data else : chan_mat = np . vstack ( ( chan_mat , chan [ 0 ] . data ) ) if not len ( chan_mat . shape ) > 1 : warnings . warn ( 'Matrix of traces is less than 2D for %s' % '.' . join ( list ( stachan ) ) ) continue chan_mat = np . asarray ( chan_mat ) u , s , v = np . linalg . svd ( chan_mat . T , full_matrices = full ) svalues . append ( s ) svectors . append ( v ) uvectors . append ( u ) del ( chan_mat ) return uvectors , svalues , svectors , stachans
Compute the SVD of a number of templates .
14,381
def empirical_SVD ( stream_list , linear = True ) : warnings . warn ( 'Depreciated, use empirical_svd instead.' ) return empirical_svd ( stream_list = stream_list , linear = linear )
Depreciated . Use empirical_svd .
14,382
def empirical_svd ( stream_list , linear = True ) : stachans = list ( set ( [ ( tr . stats . station , tr . stats . channel ) for st in stream_list for tr in st ] ) ) for stachan in stachans : lengths = [ ] for st in stream_list : lengths . append ( len ( st . select ( station = stachan [ 0 ] , channel = stachan [ 1 ] ) [ 0 ] ) ) min_length = min ( lengths ) for st in stream_list : tr = st . select ( station = stachan [ 0 ] , channel = stachan [ 1 ] ) [ 0 ] if len ( tr . data ) > min_length : sr = tr . stats . sampling_rate if abs ( len ( tr . data ) - min_length ) > ( 0.1 * sr ) : msg = 'More than 0.1 s length difference, align and fix' raise IndexError ( msg ) msg = ' is not the same length as others, trimming the end' warnings . warn ( str ( tr ) + msg ) tr . data = tr . data [ 0 : min_length ] if linear : first_subspace = stacking . linstack ( stream_list ) else : first_subspace = stacking . PWS_stack ( streams = stream_list ) second_subspace = first_subspace . copy ( ) for i in range ( len ( second_subspace ) ) : second_subspace [ i ] . data = np . diff ( second_subspace [ i ] . data ) delta = second_subspace [ i ] . stats . delta second_subspace [ i ] . stats . starttime += 0.5 * delta return [ first_subspace , second_subspace ]
Empirical subspace detector generation function .
14,383
def SVD_2_stream ( uvectors , stachans , k , sampling_rate ) : warnings . warn ( 'Depreciated, use svd_to_stream instead.' ) return svd_to_stream ( uvectors = uvectors , stachans = stachans , k = k , sampling_rate = sampling_rate )
Depreciated . Use svd_to_stream
14,384
def svd_to_stream ( uvectors , stachans , k , sampling_rate ) : svstreams = [ ] for i in range ( k ) : svstream = [ ] for j , stachan in enumerate ( stachans ) : if len ( uvectors [ j ] ) <= k : warnings . warn ( 'Too few traces at %s for a %02d dimensional ' 'subspace. Detector streams will not include ' 'this channel.' % ( '.' . join ( stachan [ 0 ] , stachan [ 1 ] ) , k ) ) else : svstream . append ( Trace ( uvectors [ j ] [ i ] , header = { 'station' : stachan [ 0 ] , 'channel' : stachan [ 1 ] , 'sampling_rate' : sampling_rate } ) ) svstreams . append ( Stream ( svstream ) ) return svstreams
Convert the singular vectors output by SVD to streams .
14,385
def corr_cluster ( trace_list , thresh = 0.9 ) : stack = stacking . linstack ( [ Stream ( tr ) for tr in trace_list ] ) [ 0 ] output = np . array ( [ False ] * len ( trace_list ) ) group1 = [ ] array_xcorr = get_array_xcorr ( ) for i , tr in enumerate ( trace_list ) : if array_xcorr ( np . array ( [ tr . data ] ) , stack . data , [ 0 ] ) [ 0 ] [ 0 ] [ 0 ] > 0.6 : output [ i ] = True group1 . append ( tr ) if not group1 : warnings . warn ( 'Nothing made it past the first 0.6 threshold' ) return output stack = stacking . linstack ( [ Stream ( tr ) for tr in group1 ] ) [ 0 ] group2 = [ ] for i , tr in enumerate ( trace_list ) : if array_xcorr ( np . array ( [ tr . data ] ) , stack . data , [ 0 ] ) [ 0 ] [ 0 ] [ 0 ] > thresh : group2 . append ( tr ) output [ i ] = True else : output [ i ] = False return output
Group traces based on correlations above threshold with the stack .
14,386
def dist_mat_km ( catalog ) : dist_mat = np . array ( [ np . array ( [ 0.0 ] * len ( catalog ) ) ] * len ( catalog ) ) for i , master in enumerate ( catalog ) : mast_list = [ ] if master . preferred_origin ( ) : master_ori = master . preferred_origin ( ) else : master_ori = master . origins [ - 1 ] master_tup = ( master_ori . latitude , master_ori . longitude , master_ori . depth // 1000 ) for slave in catalog : if slave . preferred_origin ( ) : slave_ori = slave . preferred_origin ( ) else : slave_ori = slave . origins [ - 1 ] slave_tup = ( slave_ori . latitude , slave_ori . longitude , slave_ori . depth // 1000 ) mast_list . append ( dist_calc ( master_tup , slave_tup ) ) for j in range ( i , len ( catalog ) ) : dist_mat [ i , j ] = mast_list [ j ] for i in range ( 1 , len ( catalog ) ) : for j in range ( i ) : dist_mat [ i , j ] = dist_mat . T [ i , j ] return dist_mat
Compute the distance matrix for all a catalog using epicentral separation .
14,387
def space_cluster ( catalog , d_thresh , show = True ) : dist_mat = dist_mat_km ( catalog ) dist_vec = squareform ( dist_mat ) Z = linkage ( dist_vec , method = 'average' ) indices = fcluster ( Z , t = d_thresh , criterion = 'distance' ) group_ids = list ( set ( indices ) ) indices = [ ( indices [ i ] , i ) for i in range ( len ( indices ) ) ] if show : dendrogram ( Z , color_threshold = d_thresh , distance_sort = 'ascending' ) plt . show ( ) indices . sort ( key = lambda tup : tup [ 0 ] ) groups = [ ] for group_id in group_ids : group = Catalog ( ) for ind in indices : if ind [ 0 ] == group_id : group . append ( catalog [ ind [ 1 ] ] ) elif ind [ 0 ] > group_id : groups . append ( group ) break groups . append ( group ) return groups
Cluster a catalog by distance only .
14,388
def space_time_cluster ( catalog , t_thresh , d_thresh ) : initial_spatial_groups = space_cluster ( catalog = catalog , d_thresh = d_thresh , show = False ) initial_spatial_lists = [ ] for group in initial_spatial_groups : initial_spatial_lists . append ( list ( group ) ) groups = [ ] for group in initial_spatial_lists : for master in group : for event in group : if abs ( event . preferred_origin ( ) . time - master . preferred_origin ( ) . time ) > t_thresh : groups . append ( [ event ] ) group . remove ( event ) groups . append ( group ) return [ Catalog ( group ) for group in groups ]
Cluster detections in space and time .
14,389
def re_thresh_csv ( path , old_thresh , new_thresh , chan_thresh ) : from eqcorrscan . core . match_filter import read_detections warnings . warn ( 'Legacy function, please use ' 'eqcorrscan.core.match_filter.Party.rethreshold.' ) old_detections = read_detections ( path ) old_thresh = float ( old_thresh ) new_thresh = float ( new_thresh ) detections = [ ] detections_in = 0 detections_out = 0 for detection in old_detections : detections_in += 1 con1 = ( new_thresh / old_thresh ) * detection . threshold con2 = detection . no_chans >= chan_thresh requirted_thresh = ( new_thresh / old_thresh ) * detection . threshold con3 = abs ( detection . detect_val ) >= requirted_thresh if all ( [ con1 , con2 , con3 ] ) : detections_out += 1 detections . append ( detection ) print ( 'Read in %i detections' % detections_in ) print ( 'Left with %i detections' % detections_out ) return detections
Remove detections by changing the threshold .
14,390
def pool_boy ( Pool , traces , ** kwargs ) : n_cores = kwargs . get ( 'cores' , cpu_count ( ) ) if n_cores is None : n_cores = cpu_count ( ) if n_cores > traces : n_cores = traces pool = Pool ( n_cores ) yield pool pool . close ( ) pool . join ( )
A context manager for handling the setup and cleanup of a pool object .
14,391
def _general_multithread ( func ) : def multithread ( templates , stream , * args , ** kwargs ) : with pool_boy ( ThreadPool , len ( stream ) , ** kwargs ) as pool : return _pool_normxcorr ( templates , stream , pool = pool , func = func ) return multithread
return the general multithreading function using func
14,392
def register_array_xcorr ( name , func = None , is_default = False ) : valid_methods = set ( list ( XCOR_ARRAY_METHODS ) + list ( XCORR_STREAM_METHODS ) ) cache = { } def register ( register_str ) : if register_str not in valid_methods : msg = 'register_name must be in %s' % valid_methods raise ValueError ( msg ) def _register ( func ) : cache [ register_str ] = func setattr ( cache [ 'func' ] , register_str , func ) return func return _register def wrapper ( func , func_name = None ) : fname = func_name or name . __name__ if callable ( name ) else str ( name ) XCOR_FUNCS [ fname ] = func func . register = register cache [ 'func' ] = func func . multithread = _general_multithread ( func ) func . multiprocess = _general_multiprocess ( func ) func . concurrent = _general_multithread ( func ) func . stream_xcorr = _general_serial ( func ) func . array_xcorr = func func . registered = True if is_default : XCOR_FUNCS [ 'default' ] = copy . deepcopy ( func ) return func if callable ( name ) : return wrapper ( name ) if callable ( func ) : return wrapper ( func , func_name = name ) return wrapper
Decorator for registering correlation functions .
14,393
def _get_registerd_func ( name_or_func ) : if callable ( name_or_func ) : func = register_array_xcorr ( name_or_func ) else : func = XCOR_FUNCS [ name_or_func or 'default' ] assert callable ( func ) , 'func is not callable' if not hasattr ( func , 'registered' ) : func = register_array_xcorr ( func ) return func
get a xcorr function from a str or callable .
14,394
def numpy_normxcorr ( templates , stream , pads , * args , ** kwargs ) : import bottleneck from scipy . signal . signaltools import _centered used_chans = ~ np . isnan ( templates ) . any ( axis = 1 ) stream = stream . astype ( np . float64 ) templates = templates . astype ( np . float64 ) template_length = templates . shape [ 1 ] stream_length = len ( stream ) fftshape = next_fast_len ( template_length + stream_length - 1 ) stream_mean_array = bottleneck . move_mean ( stream , template_length ) [ template_length - 1 : ] stream_std_array = bottleneck . move_std ( stream , template_length ) [ template_length - 1 : ] stream_std_array [ stream_std_array == 0 ] = np . nan norm = ( ( templates - templates . mean ( axis = - 1 , keepdims = True ) ) / ( templates . std ( axis = - 1 , keepdims = True ) * template_length ) ) norm_sum = norm . sum ( axis = - 1 , keepdims = True ) stream_fft = np . fft . rfft ( stream , fftshape ) template_fft = np . fft . rfft ( np . flip ( norm , axis = - 1 ) , fftshape , axis = - 1 ) res = np . fft . irfft ( template_fft * stream_fft , fftshape ) [ : , 0 : template_length + stream_length - 1 ] res = ( ( _centered ( res , stream_length - template_length + 1 ) ) - norm_sum * stream_mean_array ) / stream_std_array res [ np . isnan ( res ) ] = 0.0 for i , pad in enumerate ( pads ) : res [ i ] = np . append ( res [ i ] , np . zeros ( pad ) ) [ pad : ] return res . astype ( np . float32 ) , used_chans
Compute the normalized cross - correlation using numpy and bottleneck .
14,395
def time_multi_normxcorr ( templates , stream , pads , threaded = False , * args , ** kwargs ) : used_chans = ~ np . isnan ( templates ) . any ( axis = 1 ) utilslib = _load_cdll ( 'libutils' ) argtypes = [ np . ctypeslib . ndpointer ( dtype = np . float32 , ndim = 1 , flags = native_str ( 'C_CONTIGUOUS' ) ) , ctypes . c_int , ctypes . c_int , np . ctypeslib . ndpointer ( dtype = np . float32 , ndim = 1 , flags = native_str ( 'C_CONTIGUOUS' ) ) , ctypes . c_int , np . ctypeslib . ndpointer ( dtype = np . float32 , ndim = 1 , flags = native_str ( 'C_CONTIGUOUS' ) ) ] restype = ctypes . c_int if threaded : func = utilslib . multi_normxcorr_time_threaded argtypes . append ( ctypes . c_int ) else : func = utilslib . multi_normxcorr_time func . argtypes = argtypes func . restype = restype templates_means = templates . mean ( axis = 1 ) . astype ( np . float32 ) [ : , np . newaxis ] stream_mean = stream . mean ( ) . astype ( np . float32 ) templates = templates . astype ( np . float32 ) - templates_means stream = stream . astype ( np . float32 ) - stream_mean template_len = templates . shape [ 1 ] n_templates = templates . shape [ 0 ] image_len = stream . shape [ 0 ] ccc = np . ascontiguousarray ( np . empty ( ( image_len - template_len + 1 ) * n_templates ) , np . float32 ) t_array = np . ascontiguousarray ( templates . flatten ( ) , np . float32 ) time_args = [ t_array , template_len , n_templates , np . ascontiguousarray ( stream , np . float32 ) , image_len , ccc ] if threaded : time_args . append ( kwargs . get ( 'cores' , cpu_count ( ) ) ) func ( * time_args ) ccc [ np . isnan ( ccc ) ] = 0.0 ccc = ccc . reshape ( ( n_templates , image_len - template_len + 1 ) ) for i in range ( len ( pads ) ) : ccc [ i ] = np . append ( ccc [ i ] , np . zeros ( pads [ i ] ) ) [ pads [ i ] : ] templates += templates_means stream += stream_mean return ccc , used_chans
Compute cross - correlations in the time - domain using C routine .
14,396
def _time_threaded_normxcorr ( templates , stream , * args , ** kwargs ) : no_chans = np . zeros ( len ( templates ) ) chans = [ [ ] for _ in range ( len ( templates ) ) ] array_dict_tuple = _get_array_dicts ( templates , stream ) stream_dict , template_dict , pad_dict , seed_ids = array_dict_tuple cccsums = np . zeros ( [ len ( templates ) , len ( stream [ 0 ] ) - len ( templates [ 0 ] [ 0 ] ) + 1 ] ) for seed_id in seed_ids : tr_cc , tr_chans = time_multi_normxcorr ( template_dict [ seed_id ] , stream_dict [ seed_id ] , pad_dict [ seed_id ] , True ) cccsums = np . sum ( [ cccsums , tr_cc ] , axis = 0 ) no_chans += tr_chans . astype ( np . int ) for chan , state in zip ( chans , tr_chans ) : if state : chan . append ( ( seed_id . split ( '.' ) [ 1 ] , seed_id . split ( '.' ) [ - 1 ] . split ( '_' ) [ 0 ] ) ) return cccsums , no_chans , chans
Use the threaded time - domain routine for concurrency
14,397
def _fftw_stream_xcorr ( templates , stream , * args , ** kwargs ) : num_cores_inner = kwargs . get ( 'cores' ) num_cores_outer = kwargs . get ( 'cores_outer' ) if num_cores_inner is None and num_cores_outer is None : num_cores_inner = int ( os . getenv ( "OMP_NUM_THREADS" , cpu_count ( ) ) ) num_cores_outer = 1 elif num_cores_inner is not None and num_cores_outer is None : num_cores_outer = 1 elif num_cores_outer is not None and num_cores_inner is None : num_cores_inner = 1 chans = [ [ ] for _i in range ( len ( templates ) ) ] array_dict_tuple = _get_array_dicts ( templates , stream ) stream_dict , template_dict , pad_dict , seed_ids = array_dict_tuple assert set ( seed_ids ) cccsums , tr_chans = fftw_multi_normxcorr ( template_array = template_dict , stream_array = stream_dict , pad_array = pad_dict , seed_ids = seed_ids , cores_inner = num_cores_inner , cores_outer = num_cores_outer ) no_chans = np . sum ( np . array ( tr_chans ) . astype ( np . int ) , axis = 0 ) for seed_id , tr_chan in zip ( seed_ids , tr_chans ) : for chan , state in zip ( chans , tr_chan ) : if state : chan . append ( ( seed_id . split ( '.' ) [ 1 ] , seed_id . split ( '.' ) [ - 1 ] . split ( '_' ) [ 0 ] ) ) return cccsums , no_chans , chans
Apply fftw normxcorr routine concurrently .
14,398
def get_stream_xcorr ( name_or_func = None , concurrency = None ) : func = _get_registerd_func ( name_or_func ) concur = concurrency or 'stream_xcorr' if not hasattr ( func , concur ) : msg = '%s does not support concurrency %s' % ( func . __name__ , concur ) raise ValueError ( msg ) return getattr ( func , concur )
Return a function for performing normalized cross correlation on lists of streams .
14,399
def _get_array_dicts ( templates , stream , copy_streams = True ) : template_dict = { } stream_dict = { } pad_dict = { } t_starts = [ ] stream . sort ( [ 'network' , 'station' , 'location' , 'channel' ] ) for template in templates : template . sort ( [ 'network' , 'station' , 'location' , 'channel' ] ) t_starts . append ( min ( [ tr . stats . starttime for tr in template ] ) ) seed_ids = [ tr . id + '_' + str ( i ) for i , tr in enumerate ( templates [ 0 ] ) ] for i , seed_id in enumerate ( seed_ids ) : temps_with_seed = [ template [ i ] . data for template in templates ] t_ar = np . array ( temps_with_seed ) . astype ( np . float32 ) template_dict . update ( { seed_id : t_ar } ) stream_dict . update ( { seed_id : stream . select ( id = seed_id . split ( '_' ) [ 0 ] ) [ 0 ] . data . astype ( np . float32 ) } ) pad_list = [ int ( round ( template [ i ] . stats . sampling_rate * ( template [ i ] . stats . starttime - t_starts [ j ] ) ) ) for j , template in zip ( range ( len ( templates ) ) , templates ) ] pad_dict . update ( { seed_id : pad_list } ) return stream_dict , template_dict , pad_dict , seed_ids
prepare templates and stream return dicts