idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
4,300
def regex_in_file ( regex , filepath , return_match = False ) : file_content = get_file_content ( filepath ) re_method = funcy . re_find if return_match else funcy . re_test return re_method ( regex , file_content )
Search for a regex in a file
4,301
def regex_in_package_file ( regex , filename , package_name , return_match = False ) : filepath = package_file_path ( filename , package_name ) return regex_in_file ( regex , filepath , return_match = return_match )
Search for a regex in a file contained within the package directory
4,302
def string_is_url ( test_str ) : parsed = urlparse . urlparse ( test_str ) return parsed . scheme is not None and parsed . scheme != ''
Test to see if a string is a URL or not defined in this case as a string for which urlparse returns a scheme component
4,303
def item_transaction ( self , item ) -> Transaction : items = self . __build_transaction_items ( item ) transaction = Transaction ( self , item , items ) self . __transactions . append ( transaction ) return transaction
Begin transaction state for item .
4,304
def insert_data_item ( self , before_index , data_item , auto_display : bool = True ) -> None : assert data_item is not None assert data_item not in self . data_items assert before_index <= len ( self . data_items ) and before_index >= 0 assert data_item . uuid not in self . __uuid_to_data_item data_item . session_id =...
Insert a new data item into document model .
4,305
def remove_data_item ( self , data_item : DataItem . DataItem , * , safe : bool = False ) -> typing . Optional [ typing . Sequence ] : return self . __cascade_delete ( data_item , safe = safe )
Remove data item from document model .
4,306
def transaction_context ( self ) : class DocumentModelTransaction : def __init__ ( self , document_model ) : self . __document_model = document_model def __enter__ ( self ) : self . __document_model . persistent_object_context . enter_write_delay ( self . __document_model ) return self def __exit__ ( self , type , valu...
Return a context object for a document - wide transaction .
4,307
def data_item_live ( self , data_item ) : class LiveContextManager : def __init__ ( self , manager , object ) : self . __manager = manager self . __object = object def __enter__ ( self ) : self . __manager . begin_data_item_live ( self . __object ) return self def __exit__ ( self , type , value , traceback ) : self . _...
Return a context manager to put the data item in a live state .
4,308
def begin_data_item_live ( self , data_item ) : with self . __live_data_items_lock : old_live_count = self . __live_data_items . get ( data_item . uuid , 0 ) self . __live_data_items [ data_item . uuid ] = old_live_count + 1 if old_live_count == 0 : data_item . _enter_live_state ( ) for dependent_data_item in self . ge...
Begins a live state for the data item .
4,309
def end_data_item_live ( self , data_item ) : with self . __live_data_items_lock : live_count = self . __live_data_items . get ( data_item . uuid , 0 ) - 1 assert live_count >= 0 self . __live_data_items [ data_item . uuid ] = live_count if live_count == 0 : data_item . _exit_live_state ( ) for dependent_data_item in s...
Ends a live state for the data item .
4,310
def __construct_data_item_reference ( self , hardware_source : HardwareSource . HardwareSource , data_channel : HardwareSource . DataChannel ) : session_id = self . session_id key = self . make_data_item_reference_key ( hardware_source . hardware_source_id , data_channel . channel_id ) data_item_reference = self . get_...
Construct a data item reference .
4,311
def salvar ( self , destino = None , prefix = 'tmp' , suffix = '-sat.log' ) : if destino : if os . path . exists ( destino ) : raise IOError ( ( errno . EEXIST , 'File exists' , destino , ) ) destino = os . path . abspath ( destino ) fd = os . open ( destino , os . O_EXCL | os . O_CREAT | os . O_WRONLY ) else : fd , de...
Salva o arquivo de log decodificado .
4,312
def load_data_old ( self ) : units = "" if len ( self . file_objects ) == 1 and self . file_objects [ 0 ] is not None : data = self . file_objects [ 0 ] . variables [ self . variable ] [ self . forecast_hours ] if hasattr ( self . file_objects [ 0 ] . variables [ self . variable ] , "units" ) : units = self . file_obje...
Loads time series of 2D data grids from each opened file . The code handles loading a full time series from one file or individual time steps from multiple files . Missing files are supported .
4,313
def load_data ( self ) : units = "" if self . file_objects [ 0 ] is None : raise IOError ( ) var_name , z_index = self . format_var_name ( self . variable , list ( self . file_objects [ 0 ] . variables . keys ( ) ) ) ntimes = 0 if 'time' in self . file_objects [ 0 ] . variables [ var_name ] . dimensions : ntimes = len ...
Load data from netCDF file objects or list of netCDF file objects . Handles special variable name formats .
4,314
def format_var_name ( variable , var_list ) : z_index = None if variable in var_list : var_name = variable elif variable . ljust ( 6 , "_" ) in var_list : var_name = variable . ljust ( 6 , "_" ) elif any ( [ variable in v_sub . split ( "_" ) for v_sub in var_list ] ) : var_name = var_list [ [ variable in v_sub . split ...
Searches var list for variable name checks other variable name format options .
4,315
def save_models ( self , model_path ) : for group , condition_model_set in self . condition_models . items ( ) : for model_name , model_obj in condition_model_set . items ( ) : out_filename = model_path + "{0}_{1}_condition.pkl" . format ( group , model_name . replace ( " " , "-" ) ) with open ( out_filename , "wb" ) a...
Save machine learning models to pickle files .
4,316
def output_forecasts_csv ( self , forecasts , mode , csv_path , run_date_format = "%Y%m%d-%H%M" ) : merged_forecasts = pd . merge ( forecasts [ "condition" ] , forecasts [ "dist" ] , on = [ "Step_ID" , "Track_ID" , "Ensemble_Member" , "Forecast_Hour" ] ) all_members = self . data [ mode ] [ "combo" ] [ "Ensemble_Member...
Output hail forecast values to csv files by run date and ensemble member .
4,317
def load_forecasts ( self ) : forecast_path = self . forecast_json_path + "/{0}/{1}/" . format ( self . run_date . strftime ( "%Y%m%d" ) , self . ensemble_member ) forecast_files = sorted ( glob ( forecast_path + "*.json" ) ) for forecast_file in forecast_files : file_obj = open ( forecast_file ) json_obj = json . load...
Loads the forecast files and gathers the forecast information into pandas DataFrames .
4,318
def load_obs ( self ) : track_total_file = self . track_data_csv_path + "track_total_{0}_{1}_{2}.csv" . format ( self . ensemble_name , self . ensemble_member , self . run_date . strftime ( "%Y%m%d" ) ) track_step_file = self . track_data_csv_path + "track_step_{0}_{1}_{2}.csv" . format ( self . ensemble_name , self . ...
Loads the track total and step files and merges the information into a single data frame .
4,319
def merge_obs ( self ) : for model_type in self . model_types : self . matched_forecasts [ model_type ] = { } for model_name in self . model_names [ model_type ] : self . matched_forecasts [ model_type ] [ model_name ] = pd . merge ( self . forecasts [ model_type ] [ model_name ] , self . obs , right_on = "Step_ID" , h...
Match forecasts and observations .
4,320
def roc ( self , model_type , model_name , intensity_threshold , prob_thresholds , query = None ) : roc_obj = DistributedROC ( prob_thresholds , 0.5 ) if query is not None : sub_forecasts = self . matched_forecasts [ model_type ] [ model_name ] . query ( query ) sub_forecasts = sub_forecasts . reset_index ( drop = True...
Calculates a ROC curve at a specified intensity threshold .
4,321
def sample_forecast_max_hail ( self , dist_model_name , condition_model_name , num_samples , condition_threshold = 0.5 , query = None ) : if query is not None : dist_forecasts = self . matched_forecasts [ "dist" ] [ dist_model_name ] . query ( query ) dist_forecasts = dist_forecasts . reset_index ( drop = True ) condit...
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes .
4,322
def get_params ( self ) : params = { 'key' : self . get_app_key ( ) , 'uid' : self . user_id , 'widget' : self . widget_code } products_number = len ( self . products ) if self . get_api_type ( ) == self . API_GOODS : if isinstance ( self . products , list ) : if products_number == 1 : product = self . products [ 0 ] i...
Get signature and params
4,323
def load_forecasts ( self ) : run_date_str = self . run_date . strftime ( "%Y%m%d" ) for model_name in self . model_names : self . raw_forecasts [ model_name ] = { } forecast_file = self . forecast_path + run_date_str + "/" + model_name . replace ( " " , "-" ) + "_hailprobs_{0}_{1}.nc" . format ( self . ensemble_member...
Load the forecast files into memory .
4,324
def get_window_forecasts ( self ) : for model_name in self . model_names : self . window_forecasts [ model_name ] = { } for size_threshold in self . size_thresholds : self . window_forecasts [ model_name ] [ size_threshold ] = np . array ( [ self . raw_forecasts [ model_name ] [ size_threshold ] [ sl ] . sum ( axis = 0...
Aggregate the forecasts within the specified time windows .
4,325
def dilate_obs ( self , dilation_radius ) : for s in self . size_thresholds : self . dilated_obs [ s ] = np . zeros ( self . window_obs [ self . mrms_variable ] . shape ) for t in range ( self . dilated_obs [ s ] . shape [ 0 ] ) : self . dilated_obs [ s ] [ t ] [ binary_dilation ( self . window_obs [ self . mrms_variab...
Use a dilation filter to grow positive observation areas by a specified number of grid points
4,326
def roc_curves ( self , prob_thresholds ) : all_roc_curves = { } for model_name in self . model_names : all_roc_curves [ model_name ] = { } for size_threshold in self . size_thresholds : all_roc_curves [ model_name ] [ size_threshold ] = { } for h , hour_window in enumerate ( self . hour_windows ) : hour_range = ( hour...
Generate ROC Curve objects for each machine learning model size threshold and time window .
4,327
def reliability_curves ( self , prob_thresholds ) : all_rel_curves = { } for model_name in self . model_names : all_rel_curves [ model_name ] = { } for size_threshold in self . size_thresholds : all_rel_curves [ model_name ] [ size_threshold ] = { } for h , hour_window in enumerate ( self . hour_windows ) : hour_range ...
Output reliability curves for each machine learning model size threshold and time window .
4,328
def load_map_coordinates ( map_file ) : if map_file [ - 4 : ] == ".pkl" : map_data = pickle . load ( open ( map_file ) ) lon = map_data [ 'lon' ] lat = map_data [ 'lat' ] else : map_data = Dataset ( map_file ) if "lon" in map_data . variables . keys ( ) : lon = map_data . variables [ 'lon' ] [ : ] lat = map_data . vari...
Loads map coordinates from netCDF or pickle file created by util . makeMapGrids .
4,329
def load_data ( self ) : data = [ ] loaded_dates = [ ] loaded_indices = [ ] for t , timestamp in enumerate ( self . all_dates ) : date_str = timestamp . date ( ) . strftime ( "%Y%m%d" ) full_path = self . path_start + date_str + "/" if self . variable in os . listdir ( full_path ) : full_path += self . variable + "/" d...
Loads data from MRMS GRIB2 files and handles compression duties if files are compressed .
4,330
def interpolate_grid ( self , in_lon , in_lat ) : out_data = np . zeros ( ( self . data . shape [ 0 ] , in_lon . shape [ 0 ] , in_lon . shape [ 1 ] ) ) for d in range ( self . data . shape [ 0 ] ) : print ( "Loading " , d , self . variable , self . start_date ) if self . data [ d ] . max ( ) > - 999 : step = self . dat...
Interpolates MRMS data to a different grid using cubic bivariate splines
4,331
def max_neighbor ( self , in_lon , in_lat , radius = 0.05 ) : out_data = np . zeros ( ( self . data . shape [ 0 ] , in_lon . shape [ 0 ] , in_lon . shape [ 1 ] ) ) in_tree = cKDTree ( np . vstack ( ( in_lat . ravel ( ) , in_lon . ravel ( ) ) ) . T ) out_indices = np . indices ( out_data . shape [ 1 : ] ) out_rows = out...
Finds the largest value within a given radius of a point on the interpolated grid .
4,332
def interpolate_to_netcdf ( self , in_lon , in_lat , out_path , date_unit = "seconds since 1970-01-01T00:00" , interp_type = "spline" ) : if interp_type == "spline" : out_data = self . interpolate_grid ( in_lon , in_lat ) else : out_data = self . max_neighbor ( in_lon , in_lat ) if not os . access ( out_path + self . v...
Calls the interpolation function and then saves the MRMS data to a netCDF file . It will also create separate directories for each variable if they are not already available .
4,333
def get_data_generator_by_id ( hardware_source_id , sync = True ) : hardware_source = HardwareSourceManager ( ) . get_hardware_source_for_hardware_source_id ( hardware_source_id ) def get_last_data ( ) : return hardware_source . get_next_xdatas_to_finish ( ) [ 0 ] . data . copy ( ) yield get_last_data
Return a generator for data .
4,334
def parse_hardware_aliases_config_file ( config_path ) : if os . path . exists ( config_path ) : logging . info ( "Parsing alias file {:s}" . format ( config_path ) ) try : config = configparser . ConfigParser ( ) config . read ( config_path ) for section in config . sections ( ) : device = config . get ( section , "de...
Parse config file for aliases and automatically register them .
4,335
def make_instrument_alias ( self , instrument_id , alias_instrument_id , display_name ) : self . __aliases [ alias_instrument_id ] = ( instrument_id , display_name ) for f in self . aliases_updated : f ( )
Configure an alias .
4,336
def update ( self , data_and_metadata : DataAndMetadata . DataAndMetadata , state : str , sub_area , view_id ) -> None : self . __state = state self . __sub_area = sub_area hardware_source_id = self . __hardware_source . hardware_source_id channel_index = self . index channel_id = self . channel_id channel_name = self ...
Called from hardware source when new data arrives .
4,337
def start ( self ) : old_start_count = self . __start_count self . __start_count += 1 if old_start_count == 0 : self . data_channel_start_event . fire ( )
Called from hardware source when data starts streaming .
4,338
def connect_data_item_reference ( self , data_item_reference ) : display_item = data_item_reference . display_item data_item = display_item . data_item if display_item else None if data_item and display_item : self . __connect_display ( display_item ) else : def data_item_reference_changed ( ) : self . __data_item_refe...
Connect to the data item reference creating a crop graphic if necessary .
4,339
def grab_earliest ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : timeout = timeout if timeout is not None else 10.0 with self . __buffer_lock : if len ( self . __buffer ) == 0 : done_event = threading . Event ( ) self . __done_events . append ( done_event ) self . __buffer_lo...
Grab the earliest data from the buffer blocking until one is available .
4,340
def grab_next ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : with self . __buffer_lock : self . __buffer = list ( ) return self . grab_latest ( timeout )
Grab the next data to finish from the buffer blocking until one is available .
4,341
def grab_following ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : self . grab_next ( timeout ) return self . grab_next ( timeout )
Grab the next data to start from the buffer blocking until one is available .
4,342
def pause ( self ) -> None : with self . __state_lock : if self . __state == DataChannelBuffer . State . started : self . __state = DataChannelBuffer . State . paused
Pause recording .
4,343
def resume ( self ) -> None : with self . __state_lock : if self . __state == DataChannelBuffer . State . paused : self . __state = DataChannelBuffer . State . started
Resume recording after pause .
4,344
def nlargest ( n , mapping ) : try : it = mapping . iteritems ( ) except AttributeError : it = iter ( mapping . items ( ) ) pq = minpq ( ) try : for i in range ( n ) : pq . additem ( * next ( it ) ) except StopIteration : pass try : while it : pq . pushpopitem ( * next ( it ) ) except StopIteration : pass out = list ( ...
Takes a mapping and returns the n keys associated with the largest values in descending order . If the mapping has fewer than n items all its keys are returned .
4,345
def fromkeys ( cls , iterable , value , ** kwargs ) : return cls ( ( ( k , value ) for k in iterable ) , ** kwargs )
Return a new pqict mapping keys from an iterable to the same value .
4,346
def copy ( self ) : return self . __class__ ( self , key = self . _keyfn , precedes = self . _precedes )
Return a shallow copy of a pqdict .
4,347
def pop ( self , key = __marker , default = __marker ) : heap = self . _heap position = self . _position if key is self . __marker : if not heap : raise KeyError ( 'pqdict is empty' ) key = heap [ 0 ] . key del self [ key ] return key try : pos = position . pop ( key ) except KeyError : if default is self . __marker : ...
If key is in the pqdict remove it and return its priority value else return default . If default is not provided and key is not in the pqdict raise a KeyError .
4,348
def popitem ( self ) : heap = self . _heap position = self . _position try : end = heap . pop ( - 1 ) except IndexError : raise KeyError ( 'pqdict is empty' ) if heap : node = heap [ 0 ] heap [ 0 ] = end position [ end . key ] = 0 self . _sink ( 0 ) else : node = end del position [ node . key ] return node . key , node...
Remove and return the item with highest priority . Raises KeyError if pqdict is empty .
4,349
def topitem ( self ) : try : node = self . _heap [ 0 ] except IndexError : raise KeyError ( 'pqdict is empty' ) return node . key , node . value
Return the item with highest priority . Raises KeyError if pqdict is empty .
4,350
def additem ( self , key , value ) : if key in self . _position : raise KeyError ( '%s is already in the queue' % repr ( key ) ) self [ key ] = value
Add a new item . Raises KeyError if key is already in the pqdict .
4,351
def pushpopitem ( self , key , value , node_factory = _Node ) : heap = self . _heap position = self . _position precedes = self . _precedes prio = self . _keyfn ( value ) if self . _keyfn else value node = node_factory ( key , value , prio ) if key in self : raise KeyError ( '%s is already in the queue' % repr ( key ) ...
Equivalent to inserting a new item followed by removing the top priority item but faster . Raises KeyError if the new key is already in the pqdict .
4,352
def updateitem ( self , key , new_val ) : if key not in self . _position : raise KeyError ( key ) self [ key ] = new_val
Update the priority value of an existing item . Raises KeyError if key is not in the pqdict .
4,353
def replace_key ( self , key , new_key ) : heap = self . _heap position = self . _position if new_key in self : raise KeyError ( '%s is already in the queue' % repr ( new_key ) ) pos = position . pop ( key ) position [ new_key ] = pos heap [ pos ] . key = new_key
Replace the key of an existing heap node in place . Raises KeyError if the key to replace does not exist or if the new key is already in the pqdict .
4,354
def swap_priority ( self , key1 , key2 ) : heap = self . _heap position = self . _position if key1 not in self or key2 not in self : raise KeyError pos1 , pos2 = position [ key1 ] , position [ key2 ] heap [ pos1 ] . key , heap [ pos2 ] . key = key2 , key1 position [ key1 ] , position [ key2 ] = pos2 , pos1
Fast way to swap the priority level of two items in the pqdict . Raises KeyError if either key does not exist .
4,355
def heapify ( self , key = __marker ) : if key is self . __marker : n = len ( self . _heap ) for pos in reversed ( range ( n // 2 ) ) : self . _sink ( pos ) else : try : pos = self . _position [ key ] except KeyError : raise KeyError ( key ) self . _reheapify ( pos )
Repair a broken heap . If the state of an item s priority value changes you can re - sort the relevant item only by providing key .
4,356
def package_has_version_file ( package_name ) : version_file_path = helpers . package_file_path ( '_version.py' , package_name ) return os . path . isfile ( version_file_path )
Check to make sure _version . py is contained in the package
4,357
def get_project_name ( ) : setup_py_content = helpers . get_file_content ( 'setup.py' ) ret = helpers . value_of_named_argument_in_function ( 'name' , 'setup' , setup_py_content , resolve_varname = True ) if ret and ret [ 0 ] == ret [ - 1 ] in ( '"' , "'" ) : ret = ret [ 1 : - 1 ] return ret
Grab the project name out of setup . py
4,358
def get_version ( package_name , ignore_cache = False ) : if ignore_cache : with microcache . temporarily_disabled ( ) : found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' , package_name , return_match = True ) else : found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' ,...
Get the version which is currently configured by the package
4,359
def set_version ( package_name , version_str ) : current_version = get_version ( package_name ) version_file_path = helpers . package_file_path ( '_version.py' , package_name ) version_file_content = helpers . get_file_content ( version_file_path ) version_file_content = version_file_content . replace ( current_version...
Set the version in _version . py to version_str
4,360
def version_is_valid ( version_str ) : try : packaging . version . Version ( version_str ) except packaging . version . InvalidVersion : return False return True
Check to see if the version specified is a valid as far as pkg_resources is concerned
4,361
def _get_uploaded_versions_warehouse ( project_name , index_url , requests_verify = True ) : url = '/' . join ( ( index_url , project_name , 'json' ) ) response = requests . get ( url , verify = requests_verify ) if response . status_code == 200 : return response . json ( ) [ 'releases' ] . keys ( ) return None
Query the pypi index at index_url using warehouse api to find all of the releases
4,362
def _get_uploaded_versions_pypicloud ( project_name , index_url , requests_verify = True ) : api_url = index_url for suffix in ( '/pypi' , '/pypi/' , '/simple' , '/simple/' ) : if api_url . endswith ( suffix ) : api_url = api_url [ : len ( suffix ) * - 1 ] + '/api/package' break url = '/' . join ( ( api_url , project_n...
Query the pypi index at index_url using pypicloud api to find all versions
4,363
def version_already_uploaded ( project_name , version_str , index_url , requests_verify = True ) : all_versions = _get_uploaded_versions ( project_name , index_url , requests_verify ) return version_str in all_versions
Check to see if the version specified has already been uploaded to the configured index
4,364
def convert_readme_to_rst ( ) : project_files = os . listdir ( '.' ) for filename in project_files : if filename . lower ( ) == 'readme' : raise ProjectError ( 'found {} in project directory...' . format ( filename ) + 'not sure what to do with it, refusing to convert' ) elif filename . lower ( ) == 'readme.rst' : rais...
Attempt to convert a README . md file into README . rst
4,365
def get_packaged_files ( package_name ) : if not os . path . isdir ( 'dist' ) : return [ ] return [ os . path . join ( 'dist' , filename ) for filename in os . listdir ( 'dist' ) ]
Collect relative paths to all files which have already been packaged
4,366
def multiple_packaged_versions ( package_name ) : dist_files = os . listdir ( 'dist' ) versions = set ( ) for filename in dist_files : version = funcy . re_find ( r'{}-(.+).tar.gz' . format ( package_name ) , filename ) if version : versions . add ( version ) return len ( versions ) > 1
Look through built package directory and see if there are multiple versions there
4,367
def period_neighborhood_probability ( self , radius , smoothing , threshold , stride , start_time , end_time ) : neighbor_x = self . x [ : : stride , : : stride ] neighbor_y = self . y [ : : stride , : : stride ] neighbor_kd_tree = cKDTree ( np . vstack ( ( neighbor_x . ravel ( ) , neighbor_y . ravel ( ) ) ) . T ) neig...
Calculate the neighborhood probability over the full period of the forecast
4,368
def load_map_info ( self , map_file ) : if self . ensemble_name . upper ( ) == "SSEF" : proj_dict , grid_dict = read_arps_map_file ( map_file ) self . dx = int ( grid_dict [ "dx" ] ) mapping_data = make_proj_grids ( proj_dict , grid_dict ) for m , v in mapping_data . items ( ) : setattr ( self , m , v ) self . i , self...
Load map projection information and create latitude longitude x y i and j grids for the projection .
4,369
def read_geojson ( filename ) : json_file = open ( filename ) data = json . load ( json_file ) json_file . close ( ) times = data [ "properties" ] [ "times" ] main_data = dict ( timesteps = [ ] , masks = [ ] , x = [ ] , y = [ ] , i = [ ] , j = [ ] ) attribute_data = dict ( ) for feature in data [ "features" ] : for mai...
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file .
4,370
def center_of_mass ( self , time ) : if self . start_time <= time <= self . end_time : diff = time - self . start_time valid = np . flatnonzero ( self . masks [ diff ] != 0 ) if valid . size > 0 : com_x = 1.0 / self . timesteps [ diff ] . ravel ( ) [ valid ] . sum ( ) * np . sum ( self . timesteps [ diff ] . ravel ( ) ...
Calculate the center of mass at a given timestep .
4,371
def trajectory ( self ) : traj = np . zeros ( ( 2 , self . times . size ) ) for t , time in enumerate ( self . times ) : traj [ : , t ] = self . center_of_mass ( time ) return traj
Calculates the center of mass for each time step and outputs an array
4,372
def get_corner ( self , time ) : if self . start_time <= time <= self . end_time : diff = time - self . start_time return self . i [ diff ] [ 0 , 0 ] , self . j [ diff ] [ 0 , 0 ] else : return - 1 , - 1
Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject .
4,373
def size ( self , time ) : if self . start_time <= time <= self . end_time : return self . masks [ time - self . start_time ] . sum ( ) else : return 0
Gets the size of the object at a given time .
4,374
def max_intensity ( self , time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] return self . timesteps [ ti ] . max ( )
Calculate the maximum intensity found at a timestep .
4,375
def boundary_polygon ( self , time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] com_x , com_y = self . center_of_mass ( time ) padded_mask = np . pad ( self . masks [ ti ] , 1 , 'constant' , constant_values = 0 ) chull = convex_hull_image ( padded_mask ) boundary_image = find_boundaries ( chull , mode = 'in...
Get coordinates of object boundary in counter - clockwise order
4,376
def estimate_motion ( self , time , intensity_grid , max_u , max_v ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] mask_vals = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) i_vals = self . i [ ti ] . ravel ( ) [ mask_vals ] j_vals = self . j [ ti ] . ravel ( ) [ mask_vals ] obj_vals = self . timesteps [ ...
Estimate the motion of the object with cross - correlation on the intensity values from the previous time step .
4,377
def count_overlap ( self , time , other_object , other_time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] ma = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) oti = np . where ( other_time == other_object . times ) [ 0 ] obj_coords = np . zeros ( self . masks [ ti ] . sum ( ) , dtype = [ ( 'x' , int ) , ...
Counts the number of points that overlap between this STObject and another STObject . Used for tracking .
4,378
def extract_attribute_array ( self , data_array , var_name ) : if var_name not in self . attributes . keys ( ) : self . attributes [ var_name ] = [ ] for t in range ( self . times . size ) : self . attributes [ var_name ] . append ( data_array [ self . i [ t ] , self . j [ t ] ] )
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object .
4,379
def extract_tendency_grid ( self , model_grid ) : var_name = model_grid . variable + "-tendency" self . attributes [ var_name ] = [ ] timesteps = np . arange ( self . start_time , self . end_time + 1 ) for ti , t in enumerate ( timesteps ) : t_index = t - model_grid . start_hour self . attributes [ var_name ] . append ...
Extracts the difference in model outputs
4,380
def calc_timestep_statistic ( self , statistic , time ) : ti = np . where ( self . times == time ) [ 0 ] [ 0 ] ma = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) if statistic in [ 'mean' , 'max' , 'min' , 'std' , 'ptp' ] : stat_val = getattr ( self . timesteps [ ti ] . ravel ( ) [ ma ] , statistic ) ( ) elif stat...
Calculate statistics from the primary attribute of the StObject .
4,381
def calc_shape_step ( self , stat_names , time ) : ti = np . where ( self . times == time ) [ 0 ] [ 0 ] props = regionprops ( self . masks [ ti ] , self . timesteps [ ti ] ) [ 0 ] shape_stats = [ ] for stat_name in stat_names : if "moments_hu" in stat_name : hu_index = int ( stat_name . split ( "_" ) [ - 1 ] ) hu_name ...
Calculate shape statistics for a single time step
4,382
def to_geojson ( self , filename , proj , metadata = None ) : if metadata is None : metadata = { } json_obj = { "type" : "FeatureCollection" , "features" : [ ] , "properties" : { } } json_obj [ 'properties' ] [ 'times' ] = self . times . tolist ( ) json_obj [ 'properties' ] [ 'dx' ] = self . dx json_obj [ 'properties' ...
Output the data in the STObject to a geoJSON file .
4,383
def model ( self , v = None ) : "Returns the model of node v" if v is None : v = self . estopping hist = self . hist trace = self . trace ( v ) ins = None if self . _base . _probability_calibration is not None : node = hist [ - 1 ] node . normalize ( ) X = np . array ( [ x . full_array ( ) for x in node . hy ] ) . T y ...
Returns the model of node v
4,384
def trace ( self , n ) : "Restore the position in the history of individual v's nodes" trace_map = { } self . _trace ( n , trace_map ) s = list ( trace_map . keys ( ) ) s . sort ( ) return s
Restore the position in the history of individual v s nodes
4,385
def tournament ( self , negative = False ) : if self . generation <= self . _random_generations and not negative : return self . random_selection ( ) if not self . _negative_selection and negative : return self . random_selection ( negative = negative ) vars = self . random ( ) fit = [ ( k , self . population [ x ] . f...
Tournament selection and when negative is True it performs negative tournament selection
4,386
def create_population ( self ) : "Create the initial population" base = self . _base if base . _share_inputs : used_inputs_var = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) used_inputs_naive = used_inputs_var if base . _pr_variable == 0 : used_inputs_var = SelectNumbers ( [ ] ) used_inputs_naive = SelectNumb...
Create the initial population
4,387
def add ( self , v ) : "Add an individual to the population" self . population . append ( v ) self . _current_popsize += 1 v . position = len ( self . _hist ) self . _hist . append ( v ) self . bsf = v self . estopping = v self . _density += self . get_density ( v )
Add an individual to the population
4,388
def replace ( self , v ) : if self . popsize < self . _popsize : return self . add ( v ) k = self . tournament ( negative = True ) self . clean ( self . population [ k ] ) self . population [ k ] = v v . position = len ( self . _hist ) self . _hist . append ( v ) self . bsf = v self . estopping = v self . _inds_replace...
Replace an individual selected by negative tournament selection with individual v
4,389
def make_directory_if_needed ( directory_path ) : if os . path . exists ( directory_path ) : if not os . path . isdir ( directory_path ) : raise OSError ( "Path is not a directory:" , directory_path ) else : os . makedirs ( directory_path )
Make the directory path if needed .
4,390
def hatchery ( ) : args = docopt . docopt ( __doc__ ) task_list = args [ '<task>' ] if not task_list or 'help' in task_list or args [ '--help' ] : print ( __doc__ . format ( version = _version . __version__ , config_files = config . CONFIG_LOCATIONS ) ) return 0 level_str = args [ '--log-level' ] try : level_const = ge...
Main entry point for the hatchery program
4,391
def call ( cmd_args , suppress_output = False ) : if not funcy . is_list ( cmd_args ) and not funcy . is_tuple ( cmd_args ) : cmd_args = shlex . split ( cmd_args ) logger . info ( 'executing `{}`' . format ( ' ' . join ( cmd_args ) ) ) call_request = CallRequest ( cmd_args , suppress_output = suppress_output ) call_res...
Call an arbitary command and return the exit value stdout and stderr as a tuple
4,392
def setup ( cmd_args , suppress_output = False ) : if not funcy . is_list ( cmd_args ) and not funcy . is_tuple ( cmd_args ) : cmd_args = shlex . split ( cmd_args ) cmd_args = [ sys . executable , 'setup.py' ] + [ x for x in cmd_args ] return call ( cmd_args , suppress_output = suppress_output )
Call a setup . py command or list of commands
4,393
def load_data ( self ) : data = [ ] valid_dates = [ ] mrms_files = np . array ( sorted ( os . listdir ( self . path + self . variable + "/" ) ) ) mrms_file_dates = np . array ( [ m_file . split ( "_" ) [ - 2 ] . split ( "-" ) [ 0 ] for m_file in mrms_files ] ) old_mrms_file = None file_obj = None for t in range ( self ...
Loads data files and stores the output in the data attribute .
4,394
def rescale_data ( data , data_min , data_max , out_min = 0.0 , out_max = 100.0 ) : return ( out_max - out_min ) / ( data_max - data_min ) * ( data - data_min ) + out_min
Rescale your input data so that is ranges over integer values which will perform better in the watershed .
4,395
def label ( self , input_grid ) : marked = self . find_local_maxima ( input_grid ) marked = np . where ( marked >= 0 , 1 , 0 ) markers = splabel ( marked ) [ 0 ] return markers
Labels input grid using enhanced watershed algorithm .
4,396
def find_local_maxima ( self , input_grid ) : pixels , q_data = self . quantize ( input_grid ) centers = OrderedDict ( ) for p in pixels . keys ( ) : centers [ p ] = [ ] marked = np . ones ( q_data . shape , dtype = int ) * self . UNMARKED MIN_INFL = int ( np . round ( 1 + 0.5 * np . sqrt ( self . max_size ) ) ) MAX_IN...
Finds the local maxima in the inputGrid and perform region growing to identify objects .
4,397
def set_maximum ( self , q_data , marked , center , bin_lower , foothills ) : as_bin = [ ] as_glob = [ ] marked_so_far = [ ] will_be_considered_again = False as_bin . append ( center ) center_data = q_data [ center ] while len ( as_bin ) > 0 : p = as_bin . pop ( - 1 ) if marked [ p ] != self . UNMARKED : continue marke...
Grow a region at a certain bin level and check if the region has reached the maximum size .
4,398
def remove_foothills ( self , q_data , marked , bin_num , bin_lower , centers , foothills ) : hills = [ ] for foot in foothills : center = foot [ 0 ] hills [ : ] = foot [ 1 ] [ : ] while len ( hills ) > 0 : pt = hills . pop ( - 1 ) marked [ pt ] = self . GLOBBED for s_index , val in np . ndenumerate ( marked [ pt [ 0 ]...
Mark points determined to be foothills as globbed so that they are not included in future searches . Also searches neighboring points to foothill points to determine if they should also be considered foothills .
4,399
def quantize ( self , input_grid ) : pixels = { } for i in range ( self . max_bin + 1 ) : pixels [ i ] = [ ] data = ( np . array ( input_grid , dtype = int ) - self . min_thresh ) / self . data_increment data [ data < 0 ] = - 1 data [ data > self . max_bin ] = self . max_bin good_points = np . where ( data >= 0 ) for g...
Quantize a grid into discrete steps based on input parameters .