idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
4,300
def regex_in_file ( regex , filepath , return_match = False ) : file_content = get_file_content ( filepath ) re_method = funcy . re_find if return_match else funcy . re_test return re_method ( regex , file_content )
Search for a regex in a file
4,301
def regex_in_package_file ( regex , filename , package_name , return_match = False ) : filepath = package_file_path ( filename , package_name ) return regex_in_file ( regex , filepath , return_match = return_match )
Search for a regex in a file contained within the package directory
4,302
def string_is_url ( test_str ) : parsed = urlparse . urlparse ( test_str ) return parsed . scheme is not None and parsed . scheme != ''
Test to see if a string is a URL or not defined in this case as a string for which urlparse returns a scheme component
4,303
def item_transaction ( self , item ) -> Transaction : items = self . __build_transaction_items ( item ) transaction = Transaction ( self , item , items ) self . __transactions . append ( transaction ) return transaction
Begin transaction state for item .
4,304
def insert_data_item ( self , before_index , data_item , auto_display : bool = True ) -> None : assert data_item is not None assert data_item not in self . data_items assert before_index <= len ( self . data_items ) and before_index >= 0 assert data_item . uuid not in self . __uuid_to_data_item data_item . session_id = self . session_id self . __insert_data_item ( before_index , data_item , do_write = True ) if auto_display : display_item = DisplayItem . DisplayItem ( data_item = data_item ) self . append_display_item ( display_item )
Insert a new data item into document model .
4,305
def remove_data_item ( self , data_item : DataItem . DataItem , * , safe : bool = False ) -> typing . Optional [ typing . Sequence ] : return self . __cascade_delete ( data_item , safe = safe )
Remove data item from document model .
4,306
def transaction_context ( self ) : class DocumentModelTransaction : def __init__ ( self , document_model ) : self . __document_model = document_model def __enter__ ( self ) : self . __document_model . persistent_object_context . enter_write_delay ( self . __document_model ) return self def __exit__ ( self , type , value , traceback ) : self . __document_model . persistent_object_context . exit_write_delay ( self . __document_model ) self . __document_model . persistent_object_context . rewrite_item ( self . __document_model ) return DocumentModelTransaction ( self )
Return a context object for a document - wide transaction .
4,307
def data_item_live ( self , data_item ) : class LiveContextManager : def __init__ ( self , manager , object ) : self . __manager = manager self . __object = object def __enter__ ( self ) : self . __manager . begin_data_item_live ( self . __object ) return self def __exit__ ( self , type , value , traceback ) : self . __manager . end_data_item_live ( self . __object ) return LiveContextManager ( self , data_item )
Return a context manager to put the data item in a live state .
4,308
def begin_data_item_live ( self , data_item ) : with self . __live_data_items_lock : old_live_count = self . __live_data_items . get ( data_item . uuid , 0 ) self . __live_data_items [ data_item . uuid ] = old_live_count + 1 if old_live_count == 0 : data_item . _enter_live_state ( ) for dependent_data_item in self . get_dependent_data_items ( data_item ) : self . begin_data_item_live ( dependent_data_item )
Begins a live state for the data item .
4,309
def end_data_item_live ( self , data_item ) : with self . __live_data_items_lock : live_count = self . __live_data_items . get ( data_item . uuid , 0 ) - 1 assert live_count >= 0 self . __live_data_items [ data_item . uuid ] = live_count if live_count == 0 : data_item . _exit_live_state ( ) for dependent_data_item in self . get_dependent_data_items ( data_item ) : self . end_data_item_live ( dependent_data_item )
Ends a live state for the data item .
4,310
def __construct_data_item_reference ( self , hardware_source : HardwareSource . HardwareSource , data_channel : HardwareSource . DataChannel ) : session_id = self . session_id key = self . make_data_item_reference_key ( hardware_source . hardware_source_id , data_channel . channel_id ) data_item_reference = self . get_data_item_reference ( key ) with data_item_reference . mutex : data_item = data_item_reference . data_item if data_item is None : data_item = DataItem . DataItem ( ) data_item . ensure_data_source ( ) data_item . title = "%s (%s)" % ( hardware_source . display_name , data_channel . name ) if data_channel . name else hardware_source . display_name data_item . category = "temporary" data_item_reference . data_item = data_item def append_data_item ( ) : self . append_data_item ( data_item ) self . _update_data_item_reference ( key , data_item ) self . __call_soon ( append_data_item ) def update_session ( ) : if data_item . session_id != session_id : data_item . session_id = session_id session_metadata = ApplicationData . get_session_metadata_dict ( ) if data_item . session_metadata != session_metadata : data_item . session_metadata = session_metadata if data_channel . processor : src_data_channel = hardware_source . data_channels [ data_channel . src_channel_index ] src_data_item_reference = self . get_data_item_reference ( self . make_data_item_reference_key ( hardware_source . hardware_source_id , src_data_channel . channel_id ) ) data_channel . processor . connect_data_item_reference ( src_data_item_reference ) self . __call_soon ( update_session ) return data_item_reference
Construct a data item reference .
4,311
def salvar ( self , destino = None , prefix = 'tmp' , suffix = '-sat.log' ) : if destino : if os . path . exists ( destino ) : raise IOError ( ( errno . EEXIST , 'File exists' , destino , ) ) destino = os . path . abspath ( destino ) fd = os . open ( destino , os . O_EXCL | os . O_CREAT | os . O_WRONLY ) else : fd , destino = tempfile . mkstemp ( prefix = prefix , suffix = suffix ) os . write ( fd , self . conteudo ( ) ) os . fsync ( fd ) os . close ( fd ) return os . path . abspath ( destino )
Salva o arquivo de log decodificado .
4,312
def load_data_old ( self ) : units = "" if len ( self . file_objects ) == 1 and self . file_objects [ 0 ] is not None : data = self . file_objects [ 0 ] . variables [ self . variable ] [ self . forecast_hours ] if hasattr ( self . file_objects [ 0 ] . variables [ self . variable ] , "units" ) : units = self . file_objects [ 0 ] . variables [ self . variable ] . units elif len ( self . file_objects ) > 1 : grid_shape = [ len ( self . file_objects ) , 1 , 1 ] for file_object in self . file_objects : if file_object is not None : if self . variable in file_object . variables . keys ( ) : grid_shape = file_object . variables [ self . variable ] . shape elif self . variable . ljust ( 6 , "_" ) in file_object . variables . keys ( ) : grid_shape = file_object . variables [ self . variable . ljust ( 6 , "_" ) ] . shape else : print ( "{0} not found" . format ( self . variable ) ) raise KeyError break data = np . zeros ( ( len ( self . file_objects ) , grid_shape [ 1 ] , grid_shape [ 2 ] ) ) for f , file_object in enumerate ( self . file_objects ) : if file_object is not None : if self . variable in file_object . variables . keys ( ) : var_name = self . variable elif self . variable . ljust ( 6 , "_" ) in file_object . variables . keys ( ) : var_name = self . variable . ljust ( 6 , "_" ) else : print ( "{0} not found" . format ( self . variable ) ) raise KeyError data [ f ] = file_object . variables [ var_name ] [ 0 ] if units == "" and hasattr ( file_object . variables [ var_name ] , "units" ) : units = file_object . variables [ var_name ] . units else : data = None return data , units
Loads time series of 2D data grids from each opened file . The code handles loading a full time series from one file or individual time steps from multiple files . Missing files are supported .
4,313
def load_data ( self ) : units = "" if self . file_objects [ 0 ] is None : raise IOError ( ) var_name , z_index = self . format_var_name ( self . variable , list ( self . file_objects [ 0 ] . variables . keys ( ) ) ) ntimes = 0 if 'time' in self . file_objects [ 0 ] . variables [ var_name ] . dimensions : ntimes = len ( self . file_objects [ 0 ] . dimensions [ 'time' ] ) if ntimes > 1 : if z_index is None : data = self . file_objects [ 0 ] . variables [ var_name ] [ self . forecast_hours ] . astype ( np . float32 ) else : data = self . file_objects [ 0 ] . variables [ var_name ] [ self . forecast_hours , z_index ] . astype ( np . float32 ) else : y_dim , x_dim = self . file_objects [ 0 ] . variables [ var_name ] . shape [ - 2 : ] data = np . zeros ( ( len ( self . valid_dates ) , y_dim , x_dim ) , dtype = np . float32 ) for f , file_object in enumerate ( self . file_objects ) : if file_object is not None : if z_index is None : data [ f ] = file_object . variables [ var_name ] [ 0 ] else : data [ f ] = file_object . variables [ var_name ] [ 0 , z_index ] if hasattr ( self . file_objects [ 0 ] . variables [ var_name ] , "units" ) : units = self . file_objects [ 0 ] . variables [ var_name ] . units return data , units
Load data from netCDF file objects or list of netCDF file objects . Handles special variable name formats .
4,314
def format_var_name ( variable , var_list ) : z_index = None if variable in var_list : var_name = variable elif variable . ljust ( 6 , "_" ) in var_list : var_name = variable . ljust ( 6 , "_" ) elif any ( [ variable in v_sub . split ( "_" ) for v_sub in var_list ] ) : var_name = var_list [ [ variable in v_sub . split ( "_" ) for v_sub in var_list ] . index ( True ) ] z_index = var_name . split ( "_" ) . index ( variable ) else : raise KeyError ( "{0} not found in {1}" . format ( variable , var_list ) ) return var_name , z_index
Searches var list for variable name checks other variable name format options .
4,315
def save_models ( self , model_path ) : for group , condition_model_set in self . condition_models . items ( ) : for model_name , model_obj in condition_model_set . items ( ) : out_filename = model_path + "{0}_{1}_condition.pkl" . format ( group , model_name . replace ( " " , "-" ) ) with open ( out_filename , "wb" ) as pickle_file : pickle . dump ( model_obj , pickle_file , pickle . HIGHEST_PROTOCOL ) for group , size_model_set in self . size_models . items ( ) : for model_name , model_obj in size_model_set . items ( ) : out_filename = model_path + "{0}_{1}_size.pkl" . format ( group , model_name . replace ( " " , "-" ) ) with open ( out_filename , "wb" ) as pickle_file : pickle . dump ( model_obj , pickle_file , pickle . HIGHEST_PROTOCOL ) for group , dist_model_set in self . size_distribution_models . items ( ) : for model_type , model_objs in dist_model_set . items ( ) : for model_name , model_obj in model_objs . items ( ) : out_filename = model_path + "{0}_{1}_{2}_sizedist.pkl" . format ( group , model_name . replace ( " " , "-" ) , model_type ) with open ( out_filename , "wb" ) as pickle_file : pickle . dump ( model_obj , pickle_file , pickle . HIGHEST_PROTOCOL ) for model_type , track_type_models in self . track_models . items ( ) : for group , track_model_set in track_type_models . items ( ) : for model_name , model_obj in track_model_set . items ( ) : out_filename = model_path + "{0}_{1}_{2}_track.pkl" . format ( group , model_name . replace ( " " , "-" ) , model_type ) with open ( out_filename , "wb" ) as pickle_file : pickle . dump ( model_obj , pickle_file , pickle . HIGHEST_PROTOCOL ) return
Save machine learning models to pickle files .
4,316
def output_forecasts_csv ( self , forecasts , mode , csv_path , run_date_format = "%Y%m%d-%H%M" ) : merged_forecasts = pd . merge ( forecasts [ "condition" ] , forecasts [ "dist" ] , on = [ "Step_ID" , "Track_ID" , "Ensemble_Member" , "Forecast_Hour" ] ) all_members = self . data [ mode ] [ "combo" ] [ "Ensemble_Member" ] members = np . unique ( all_members ) all_run_dates = pd . DatetimeIndex ( self . data [ mode ] [ "combo" ] [ "Run_Date" ] ) run_dates = pd . DatetimeIndex ( np . unique ( all_run_dates ) ) print ( run_dates ) for member in members : for run_date in run_dates : mem_run_index = ( all_run_dates == run_date ) & ( all_members == member ) member_forecast = merged_forecasts . loc [ mem_run_index ] member_forecast . to_csv ( join ( csv_path , "hail_forecasts_{0}_{1}_{2}.csv" . format ( self . ensemble_name , member , run_date . strftime ( run_date_format ) ) ) ) return
Output hail forecast values to csv files by run date and ensemble member .
4,317
def load_forecasts ( self ) : forecast_path = self . forecast_json_path + "/{0}/{1}/" . format ( self . run_date . strftime ( "%Y%m%d" ) , self . ensemble_member ) forecast_files = sorted ( glob ( forecast_path + "*.json" ) ) for forecast_file in forecast_files : file_obj = open ( forecast_file ) json_obj = json . load ( file_obj ) file_obj . close ( ) track_id = json_obj [ 'properties' ] [ "id" ] obs_track_id = json_obj [ 'properties' ] [ "obs_track_id" ] forecast_hours = json_obj [ 'properties' ] [ 'times' ] duration = json_obj [ 'properties' ] [ 'duration' ] for f , feature in enumerate ( json_obj [ 'features' ] ) : area = np . sum ( feature [ "properties" ] [ "masks" ] ) step_id = track_id + "_{0:02d}" . format ( f ) for model_type in self . model_types : for model_name in self . model_names [ model_type ] : prediction = feature [ 'properties' ] [ model_type + "_" + model_name . replace ( " " , "-" ) ] if model_type == "condition" : prediction = [ prediction ] row = [ track_id , obs_track_id , self . ensemble_name , self . ensemble_member , forecast_hours [ f ] , f + 1 , duration , area ] + prediction self . forecasts [ model_type ] [ model_name ] . loc [ step_id ] = row
Loads the forecast files and gathers the forecast information into pandas DataFrames .
4,318
def load_obs ( self ) : track_total_file = self . track_data_csv_path + "track_total_{0}_{1}_{2}.csv" . format ( self . ensemble_name , self . ensemble_member , self . run_date . strftime ( "%Y%m%d" ) ) track_step_file = self . track_data_csv_path + "track_step_{0}_{1}_{2}.csv" . format ( self . ensemble_name , self . ensemble_member , self . run_date . strftime ( "%Y%m%d" ) ) track_total_cols = [ "Track_ID" , "Translation_Error_X" , "Translation_Error_Y" , "Start_Time_Error" ] track_step_cols = [ "Step_ID" , "Track_ID" , "Hail_Size" , "Shape" , "Location" , "Scale" ] track_total_data = pd . read_csv ( track_total_file , usecols = track_total_cols ) track_step_data = pd . read_csv ( track_step_file , usecols = track_step_cols ) obs_data = pd . merge ( track_step_data , track_total_data , on = "Track_ID" , how = "left" ) self . obs = obs_data
Loads the track total and step files and merges the information into a single data frame .
4,319
def merge_obs ( self ) : for model_type in self . model_types : self . matched_forecasts [ model_type ] = { } for model_name in self . model_names [ model_type ] : self . matched_forecasts [ model_type ] [ model_name ] = pd . merge ( self . forecasts [ model_type ] [ model_name ] , self . obs , right_on = "Step_ID" , how = "left" , left_index = True )
Match forecasts and observations .
4,320
def roc ( self , model_type , model_name , intensity_threshold , prob_thresholds , query = None ) : roc_obj = DistributedROC ( prob_thresholds , 0.5 ) if query is not None : sub_forecasts = self . matched_forecasts [ model_type ] [ model_name ] . query ( query ) sub_forecasts = sub_forecasts . reset_index ( drop = True ) else : sub_forecasts = self . matched_forecasts [ model_type ] [ model_name ] obs_values = np . zeros ( sub_forecasts . shape [ 0 ] ) if sub_forecasts . shape [ 0 ] > 0 : if model_type == "dist" : forecast_values = np . array ( [ gamma_sf ( intensity_threshold , * params ) for params in sub_forecasts [ self . forecast_bins [ model_type ] ] . values ] ) obs_probs = np . array ( [ gamma_sf ( intensity_threshold , * params ) for params in sub_forecasts [ self . type_cols [ model_type ] ] . values ] ) obs_values [ obs_probs >= 0.01 ] = 1 elif len ( self . forecast_bins [ model_type ] ) > 1 : fbin = np . argmin ( np . abs ( self . forecast_bins [ model_type ] - intensity_threshold ) ) forecast_values = 1 - sub_forecasts [ self . forecast_bins [ model_type ] . astype ( str ) ] . values . cumsum ( axis = 1 ) [ : , fbin ] obs_values [ sub_forecasts [ self . type_cols [ model_type ] ] . values >= intensity_threshold ] = 1 else : forecast_values = sub_forecasts [ self . forecast_bins [ model_type ] . astype ( str ) [ 0 ] ] . values obs_values [ sub_forecasts [ self . type_cols [ model_type ] ] . values >= intensity_threshold ] = 1 roc_obj . update ( forecast_values , obs_values ) return roc_obj
Calculates a ROC curve at a specified intensity threshold .
4,321
def sample_forecast_max_hail ( self , dist_model_name , condition_model_name , num_samples , condition_threshold = 0.5 , query = None ) : if query is not None : dist_forecasts = self . matched_forecasts [ "dist" ] [ dist_model_name ] . query ( query ) dist_forecasts = dist_forecasts . reset_index ( drop = True ) condition_forecasts = self . matched_forecasts [ "condition" ] [ condition_model_name ] . query ( query ) condition_forecasts = condition_forecasts . reset_index ( drop = True ) else : dist_forecasts = self . matched_forecasts [ "dist" ] [ dist_model_name ] condition_forecasts = self . matched_forecasts [ "condition" ] [ condition_model_name ] max_hail_samples = np . zeros ( ( dist_forecasts . shape [ 0 ] , num_samples ) ) areas = dist_forecasts [ "Area" ] . values for f in np . arange ( dist_forecasts . shape [ 0 ] ) : condition_prob = condition_forecasts . loc [ f , self . forecast_bins [ "condition" ] [ 0 ] ] if condition_prob >= condition_threshold : max_hail_samples [ f ] = np . sort ( gamma . rvs ( * dist_forecasts . loc [ f , self . forecast_bins [ "dist" ] ] . values , size = ( num_samples , areas [ f ] ) ) . max ( axis = 1 ) ) return max_hail_samples
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes .
4,322
def get_params ( self ) : params = { 'key' : self . get_app_key ( ) , 'uid' : self . user_id , 'widget' : self . widget_code } products_number = len ( self . products ) if self . get_api_type ( ) == self . API_GOODS : if isinstance ( self . products , list ) : if products_number == 1 : product = self . products [ 0 ] if isinstance ( product , Product ) : post_trial_product = None if isinstance ( product . get_trial_product ( ) , Product ) : post_trial_product = product product = product . get_trial_product ( ) params [ 'amount' ] = product . get_amount ( ) params [ 'currencyCode' ] = product . get_currency_code ( ) params [ 'ag_name' ] = product . get_name ( ) params [ 'ag_external_id' ] = product . get_id ( ) params [ 'ag_type' ] = product . get_type ( ) if product . get_type ( ) == Product . TYPE_SUBSCRIPTION : params [ 'ag_period_length' ] = product . get_period_length ( ) params [ 'ag_period_type' ] = product . get_period_type ( ) if product . is_recurring ( ) : params [ 'ag_recurring' ] = 1 if product . is_recurring ( ) else 0 if post_trial_product : params [ 'ag_trial' ] = 1 params [ 'ag_post_trial_external_id' ] = post_trial_product . get_id ( ) params [ 'ag_post_trial_period_length' ] = post_trial_product . get_period_length ( ) params [ 'ag_post_trial_period_type' ] = post_trial_product . get_period_type ( ) params [ 'ag_post_trial_name' ] = post_trial_product . get_name ( ) params [ 'post_trial_amount' ] = post_trial_product . get_amount ( ) params [ 'post_trial_currencyCode' ] = post_trial_product . get_currency_code ( ) else : self . append_to_errors ( 'Not a Product instance' ) else : self . append_to_errors ( 'Only 1 product is allowed' ) elif self . get_api_type ( ) == self . API_CART : index = 0 for product in self . products : params [ 'external_ids[' + str ( index ) + ']' ] = product . get_id ( ) if product . get_amount ( ) > 0 : params [ 'prices[' + str ( index ) + ']' ] = product . get_amount ( ) if product . get_currency_code ( ) != '' and product . get_currency_code ( ) is not None : params [ 'currencies[' + str ( index ) + ']' ] = product . get_currency_code ( ) index += 1 params [ 'sign_version' ] = signature_version = str ( self . get_default_widget_signature ( ) ) if not self . is_empty ( self . extra_params , 'sign_version' ) : signature_version = params [ 'sign_version' ] = str ( self . extra_params [ 'sign_version' ] ) params = self . array_merge ( params , self . extra_params ) params [ 'sign' ] = self . calculate_signature ( params , self . get_secret_key ( ) , int ( signature_version ) ) return params
Get signature and params
4,323
def load_forecasts ( self ) : run_date_str = self . run_date . strftime ( "%Y%m%d" ) for model_name in self . model_names : self . raw_forecasts [ model_name ] = { } forecast_file = self . forecast_path + run_date_str + "/" + model_name . replace ( " " , "-" ) + "_hailprobs_{0}_{1}.nc" . format ( self . ensemble_member , run_date_str ) forecast_obj = Dataset ( forecast_file ) forecast_hours = forecast_obj . variables [ "forecast_hour" ] [ : ] valid_hour_indices = np . where ( ( self . start_hour <= forecast_hours ) & ( forecast_hours <= self . end_hour ) ) [ 0 ] for size_threshold in self . size_thresholds : self . raw_forecasts [ model_name ] [ size_threshold ] = forecast_obj . variables [ "prob_hail_{0:02d}_mm" . format ( size_threshold ) ] [ valid_hour_indices ] forecast_obj . close ( )
Load the forecast files into memory .
4,324
def get_window_forecasts ( self ) : for model_name in self . model_names : self . window_forecasts [ model_name ] = { } for size_threshold in self . size_thresholds : self . window_forecasts [ model_name ] [ size_threshold ] = np . array ( [ self . raw_forecasts [ model_name ] [ size_threshold ] [ sl ] . sum ( axis = 0 ) for sl in self . hour_windows ] )
Aggregate the forecasts within the specified time windows .
4,325
def dilate_obs ( self , dilation_radius ) : for s in self . size_thresholds : self . dilated_obs [ s ] = np . zeros ( self . window_obs [ self . mrms_variable ] . shape ) for t in range ( self . dilated_obs [ s ] . shape [ 0 ] ) : self . dilated_obs [ s ] [ t ] [ binary_dilation ( self . window_obs [ self . mrms_variable ] [ t ] >= s , iterations = dilation_radius ) ] = 1
Use a dilation filter to grow positive observation areas by a specified number of grid points
4,326
def roc_curves ( self , prob_thresholds ) : all_roc_curves = { } for model_name in self . model_names : all_roc_curves [ model_name ] = { } for size_threshold in self . size_thresholds : all_roc_curves [ model_name ] [ size_threshold ] = { } for h , hour_window in enumerate ( self . hour_windows ) : hour_range = ( hour_window . start , hour_window . stop ) all_roc_curves [ model_name ] [ size_threshold ] [ hour_range ] = DistributedROC ( prob_thresholds , 1 ) if self . obs_mask : all_roc_curves [ model_name ] [ size_threshold ] [ hour_range ] . update ( self . window_forecasts [ model_name ] [ size_threshold ] [ h ] [ self . window_obs [ self . mask_variable ] [ h ] > 0 ] , self . dilated_obs [ size_threshold ] [ h ] [ self . window_obs [ self . mask_variable ] [ h ] > 0 ] ) else : all_roc_curves [ model_name ] [ size_threshold ] [ hour_range ] . update ( self . window_forecasts [ model_name ] [ size_threshold ] [ h ] , self . dilated_obs [ size_threshold ] [ h ] ) return all_roc_curves
Generate ROC Curve objects for each machine learning model size threshold and time window .
4,327
def reliability_curves ( self , prob_thresholds ) : all_rel_curves = { } for model_name in self . model_names : all_rel_curves [ model_name ] = { } for size_threshold in self . size_thresholds : all_rel_curves [ model_name ] [ size_threshold ] = { } for h , hour_window in enumerate ( self . hour_windows ) : hour_range = ( hour_window . start , hour_window . stop ) all_rel_curves [ model_name ] [ size_threshold ] [ hour_range ] = DistributedReliability ( prob_thresholds , 1 ) if self . obs_mask : all_rel_curves [ model_name ] [ size_threshold ] [ hour_range ] . update ( self . window_forecasts [ model_name ] [ size_threshold ] [ h ] [ self . window_obs [ self . mask_variable ] [ h ] > 0 ] , self . dilated_obs [ size_threshold ] [ h ] [ self . window_obs [ self . mask_variable ] [ h ] > 0 ] ) else : all_rel_curves [ model_name ] [ size_threshold ] [ hour_range ] . update ( self . window_forecasts [ model_name ] [ size_threshold ] [ h ] , self . dilated_obs [ size_threshold ] [ h ] ) return all_rel_curves
Output reliability curves for each machine learning model size threshold and time window .
4,328
def load_map_coordinates ( map_file ) : if map_file [ - 4 : ] == ".pkl" : map_data = pickle . load ( open ( map_file ) ) lon = map_data [ 'lon' ] lat = map_data [ 'lat' ] else : map_data = Dataset ( map_file ) if "lon" in map_data . variables . keys ( ) : lon = map_data . variables [ 'lon' ] [ : ] lat = map_data . variables [ 'lat' ] [ : ] else : lon = map_data . variables [ "XLONG" ] [ 0 ] lat = map_data . variables [ "XLAT" ] [ 0 ] return lon , lat
Loads map coordinates from netCDF or pickle file created by util . makeMapGrids .
4,329
def load_data ( self ) : data = [ ] loaded_dates = [ ] loaded_indices = [ ] for t , timestamp in enumerate ( self . all_dates ) : date_str = timestamp . date ( ) . strftime ( "%Y%m%d" ) full_path = self . path_start + date_str + "/" if self . variable in os . listdir ( full_path ) : full_path += self . variable + "/" data_files = sorted ( os . listdir ( full_path ) ) file_dates = pd . to_datetime ( [ d . split ( "_" ) [ - 1 ] [ 0 : 13 ] for d in data_files ] ) if timestamp in file_dates : data_file = data_files [ np . where ( timestamp == file_dates ) [ 0 ] [ 0 ] ] print ( full_path + data_file ) if data_file [ - 2 : ] == "gz" : subprocess . call ( [ "gunzip" , full_path + data_file ] ) file_obj = Nio . open_file ( full_path + data_file [ : - 3 ] ) else : file_obj = Nio . open_file ( full_path + data_file ) var_name = sorted ( file_obj . variables . keys ( ) ) [ 0 ] data . append ( file_obj . variables [ var_name ] [ : ] ) if self . lon is None : self . lon = file_obj . variables [ "lon_0" ] [ : ] if np . count_nonzero ( self . lon > 180 ) > 0 : self . lon -= 360 self . lat = file_obj . variables [ "lat_0" ] [ : ] file_obj . close ( ) if data_file [ - 2 : ] == "gz" : subprocess . call ( [ "gzip" , full_path + data_file [ : - 3 ] ] ) else : subprocess . call ( [ "gzip" , full_path + data_file ] ) loaded_dates . append ( timestamp ) loaded_indices . append ( t ) if len ( loaded_dates ) > 0 : self . loaded_dates = pd . DatetimeIndex ( loaded_dates ) self . data = np . ones ( ( self . all_dates . shape [ 0 ] , data [ 0 ] . shape [ 0 ] , data [ 0 ] . shape [ 1 ] ) ) * - 9999 self . data [ loaded_indices ] = np . array ( data )
Loads data from MRMS GRIB2 files and handles compression duties if files are compressed .
4,330
def interpolate_grid ( self , in_lon , in_lat ) : out_data = np . zeros ( ( self . data . shape [ 0 ] , in_lon . shape [ 0 ] , in_lon . shape [ 1 ] ) ) for d in range ( self . data . shape [ 0 ] ) : print ( "Loading " , d , self . variable , self . start_date ) if self . data [ d ] . max ( ) > - 999 : step = self . data [ d ] step [ step < 0 ] = 0 if self . lat [ - 1 ] < self . lat [ 0 ] : spline = RectBivariateSpline ( self . lat [ : : - 1 ] , self . lon , step [ : : - 1 ] , kx = 3 , ky = 3 ) else : spline = RectBivariateSpline ( self . lat , self . lon , step , kx = 3 , ky = 3 ) print ( "Evaluating" , d , self . variable , self . start_date ) flat_data = spline . ev ( in_lat . ravel ( ) , in_lon . ravel ( ) ) out_data [ d ] = flat_data . reshape ( in_lon . shape ) del spline else : print ( d , " is missing" ) out_data [ d ] = - 9999 return out_data
Interpolates MRMS data to a different grid using cubic bivariate splines
4,331
def max_neighbor ( self , in_lon , in_lat , radius = 0.05 ) : out_data = np . zeros ( ( self . data . shape [ 0 ] , in_lon . shape [ 0 ] , in_lon . shape [ 1 ] ) ) in_tree = cKDTree ( np . vstack ( ( in_lat . ravel ( ) , in_lon . ravel ( ) ) ) . T ) out_indices = np . indices ( out_data . shape [ 1 : ] ) out_rows = out_indices [ 0 ] . ravel ( ) out_cols = out_indices [ 1 ] . ravel ( ) for d in range ( self . data . shape [ 0 ] ) : nz_points = np . where ( self . data [ d ] > 0 ) if len ( nz_points [ 0 ] ) > 0 : nz_vals = self . data [ d ] [ nz_points ] nz_rank = np . argsort ( nz_vals ) original_points = cKDTree ( np . vstack ( ( self . lat [ nz_points [ 0 ] [ nz_rank ] ] , self . lon [ nz_points [ 1 ] [ nz_rank ] ] ) ) . T ) all_neighbors = original_points . query_ball_tree ( in_tree , radius , p = 2 , eps = 0 ) for n , neighbors in enumerate ( all_neighbors ) : if len ( neighbors ) > 0 : out_data [ d , out_rows [ neighbors ] , out_cols [ neighbors ] ] = nz_vals [ nz_rank ] [ n ] return out_data
Finds the largest value within a given radius of a point on the interpolated grid .
4,332
def interpolate_to_netcdf ( self , in_lon , in_lat , out_path , date_unit = "seconds since 1970-01-01T00:00" , interp_type = "spline" ) : if interp_type == "spline" : out_data = self . interpolate_grid ( in_lon , in_lat ) else : out_data = self . max_neighbor ( in_lon , in_lat ) if not os . access ( out_path + self . variable , os . R_OK ) : try : os . mkdir ( out_path + self . variable ) except OSError : print ( out_path + self . variable + " already created" ) out_file = out_path + self . variable + "/" + "{0}_{1}_{2}.nc" . format ( self . variable , self . start_date . strftime ( "%Y%m%d-%H:%M" ) , self . end_date . strftime ( "%Y%m%d-%H:%M" ) ) out_obj = Dataset ( out_file , "w" ) out_obj . createDimension ( "time" , out_data . shape [ 0 ] ) out_obj . createDimension ( "y" , out_data . shape [ 1 ] ) out_obj . createDimension ( "x" , out_data . shape [ 2 ] ) data_var = out_obj . createVariable ( self . variable , "f4" , ( "time" , "y" , "x" ) , zlib = True , fill_value = - 9999.0 , least_significant_digit = 3 ) data_var [ : ] = out_data data_var . long_name = self . variable data_var . coordinates = "latitude longitude" if "MESH" in self . variable or "QPE" in self . variable : data_var . units = "mm" elif "Reflectivity" in self . variable : data_var . units = "dBZ" elif "Rotation" in self . variable : data_var . units = "s-1" else : data_var . units = "" out_lon = out_obj . createVariable ( "longitude" , "f4" , ( "y" , "x" ) , zlib = True ) out_lon [ : ] = in_lon out_lon . units = "degrees_east" out_lat = out_obj . createVariable ( "latitude" , "f4" , ( "y" , "x" ) , zlib = True ) out_lat [ : ] = in_lat out_lat . units = "degrees_north" dates = out_obj . createVariable ( "time" , "i8" , ( "time" , ) , zlib = True ) dates [ : ] = np . round ( date2num ( self . all_dates . to_pydatetime ( ) , date_unit ) ) . astype ( np . int64 ) dates . long_name = "Valid date" dates . units = date_unit out_obj . Conventions = "CF-1.6" out_obj . close ( ) return
Calls the interpolation function and then saves the MRMS data to a netCDF file . It will also create separate directories for each variable if they are not already available .
4,333
def get_data_generator_by_id ( hardware_source_id , sync = True ) : hardware_source = HardwareSourceManager ( ) . get_hardware_source_for_hardware_source_id ( hardware_source_id ) def get_last_data ( ) : return hardware_source . get_next_xdatas_to_finish ( ) [ 0 ] . data . copy ( ) yield get_last_data
Return a generator for data .
4,334
def parse_hardware_aliases_config_file ( config_path ) : if os . path . exists ( config_path ) : logging . info ( "Parsing alias file {:s}" . format ( config_path ) ) try : config = configparser . ConfigParser ( ) config . read ( config_path ) for section in config . sections ( ) : device = config . get ( section , "device" ) hardware_alias = config . get ( section , "hardware_alias" ) display_name = config . get ( section , "display_name" ) try : logging . info ( "Adding alias {:s} for device {:s}, display name: {:s} " . format ( hardware_alias , device , display_name ) ) HardwareSourceManager ( ) . make_instrument_alias ( device , hardware_alias , _ ( display_name ) ) except Exception as e : logging . info ( "Error creating hardware alias {:s} for device {:s} " . format ( hardware_alias , device ) ) logging . info ( traceback . format_exc ( ) ) except Exception as e : logging . info ( "Error reading alias file from: " + config_path ) logging . info ( traceback . format_exc ( ) ) return True return False
Parse config file for aliases and automatically register them .
4,335
def make_instrument_alias ( self , instrument_id , alias_instrument_id , display_name ) : self . __aliases [ alias_instrument_id ] = ( instrument_id , display_name ) for f in self . aliases_updated : f ( )
Configure an alias .
4,336
def update ( self , data_and_metadata : DataAndMetadata . DataAndMetadata , state : str , sub_area , view_id ) -> None : self . __state = state self . __sub_area = sub_area hardware_source_id = self . __hardware_source . hardware_source_id channel_index = self . index channel_id = self . channel_id channel_name = self . name metadata = copy . deepcopy ( data_and_metadata . metadata ) hardware_source_metadata = dict ( ) hardware_source_metadata [ "hardware_source_id" ] = hardware_source_id hardware_source_metadata [ "channel_index" ] = channel_index if channel_id is not None : hardware_source_metadata [ "reference_key" ] = "_" . join ( [ hardware_source_id , channel_id ] ) hardware_source_metadata [ "channel_id" ] = channel_id else : hardware_source_metadata [ "reference_key" ] = hardware_source_id if channel_name is not None : hardware_source_metadata [ "channel_name" ] = channel_name if view_id : hardware_source_metadata [ "view_id" ] = view_id metadata . setdefault ( "hardware_source" , dict ( ) ) . update ( hardware_source_metadata ) data = data_and_metadata . data master_data = self . __data_and_metadata . data if self . __data_and_metadata else None data_matches = master_data is not None and data . shape == master_data . shape and data . dtype == master_data . dtype if data_matches and sub_area is not None : top = sub_area [ 0 ] [ 0 ] bottom = sub_area [ 0 ] [ 0 ] + sub_area [ 1 ] [ 0 ] left = sub_area [ 0 ] [ 1 ] right = sub_area [ 0 ] [ 1 ] + sub_area [ 1 ] [ 1 ] if top > 0 or left > 0 or bottom < data . shape [ 0 ] or right < data . shape [ 1 ] : master_data = numpy . copy ( master_data ) master_data [ top : bottom , left : right ] = data [ top : bottom , left : right ] else : master_data = numpy . copy ( data ) else : master_data = data data_descriptor = data_and_metadata . data_descriptor intensity_calibration = data_and_metadata . intensity_calibration if data_and_metadata else None dimensional_calibrations = data_and_metadata . dimensional_calibrations if data_and_metadata else None timestamp = data_and_metadata . timestamp new_extended_data = DataAndMetadata . new_data_and_metadata ( master_data , intensity_calibration = intensity_calibration , dimensional_calibrations = dimensional_calibrations , metadata = metadata , timestamp = timestamp , data_descriptor = data_descriptor ) self . __data_and_metadata = new_extended_data self . data_channel_updated_event . fire ( new_extended_data ) self . is_dirty = True
Called from hardware source when new data arrives .
4,337
def start ( self ) : old_start_count = self . __start_count self . __start_count += 1 if old_start_count == 0 : self . data_channel_start_event . fire ( )
Called from hardware source when data starts streaming .
4,338
def connect_data_item_reference ( self , data_item_reference ) : display_item = data_item_reference . display_item data_item = display_item . data_item if display_item else None if data_item and display_item : self . __connect_display ( display_item ) else : def data_item_reference_changed ( ) : self . __data_item_reference_changed_event_listener . close ( ) self . connect_data_item_reference ( data_item_reference ) self . __data_item_reference_changed_event_listener = data_item_reference . data_item_reference_changed_event . listen ( data_item_reference_changed )
Connect to the data item reference creating a crop graphic if necessary .
4,339
def grab_earliest ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : timeout = timeout if timeout is not None else 10.0 with self . __buffer_lock : if len ( self . __buffer ) == 0 : done_event = threading . Event ( ) self . __done_events . append ( done_event ) self . __buffer_lock . release ( ) done = done_event . wait ( timeout ) self . __buffer_lock . acquire ( ) if not done : raise Exception ( "Could not grab latest." ) return self . __buffer . pop ( 0 )
Grab the earliest data from the buffer blocking until one is available .
4,340
def grab_next ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : with self . __buffer_lock : self . __buffer = list ( ) return self . grab_latest ( timeout )
Grab the next data to finish from the buffer blocking until one is available .
4,341
def grab_following ( self , timeout : float = None ) -> typing . List [ DataAndMetadata . DataAndMetadata ] : self . grab_next ( timeout ) return self . grab_next ( timeout )
Grab the next data to start from the buffer blocking until one is available .
4,342
def pause ( self ) -> None : with self . __state_lock : if self . __state == DataChannelBuffer . State . started : self . __state = DataChannelBuffer . State . paused
Pause recording .
4,343
def resume ( self ) -> None : with self . __state_lock : if self . __state == DataChannelBuffer . State . paused : self . __state = DataChannelBuffer . State . started
Resume recording after pause .
4,344
def nlargest ( n , mapping ) : try : it = mapping . iteritems ( ) except AttributeError : it = iter ( mapping . items ( ) ) pq = minpq ( ) try : for i in range ( n ) : pq . additem ( * next ( it ) ) except StopIteration : pass try : while it : pq . pushpopitem ( * next ( it ) ) except StopIteration : pass out = list ( pq . popkeys ( ) ) out . reverse ( ) return out
Takes a mapping and returns the n keys associated with the largest values in descending order . If the mapping has fewer than n items all its keys are returned .
4,345
def fromkeys ( cls , iterable , value , ** kwargs ) : return cls ( ( ( k , value ) for k in iterable ) , ** kwargs )
Return a new pqict mapping keys from an iterable to the same value .
4,346
def copy ( self ) : return self . __class__ ( self , key = self . _keyfn , precedes = self . _precedes )
Return a shallow copy of a pqdict .
4,347
def pop ( self , key = __marker , default = __marker ) : heap = self . _heap position = self . _position if key is self . __marker : if not heap : raise KeyError ( 'pqdict is empty' ) key = heap [ 0 ] . key del self [ key ] return key try : pos = position . pop ( key ) except KeyError : if default is self . __marker : raise return default else : node_to_delete = heap [ pos ] end = heap . pop ( ) if end is not node_to_delete : heap [ pos ] = end position [ end . key ] = pos self . _reheapify ( pos ) value = node_to_delete . value del node_to_delete return value
If key is in the pqdict remove it and return its priority value else return default . If default is not provided and key is not in the pqdict raise a KeyError .
4,348
def popitem ( self ) : heap = self . _heap position = self . _position try : end = heap . pop ( - 1 ) except IndexError : raise KeyError ( 'pqdict is empty' ) if heap : node = heap [ 0 ] heap [ 0 ] = end position [ end . key ] = 0 self . _sink ( 0 ) else : node = end del position [ node . key ] return node . key , node . value
Remove and return the item with highest priority . Raises KeyError if pqdict is empty .
4,349
def topitem ( self ) : try : node = self . _heap [ 0 ] except IndexError : raise KeyError ( 'pqdict is empty' ) return node . key , node . value
Return the item with highest priority . Raises KeyError if pqdict is empty .
4,350
def additem ( self , key , value ) : if key in self . _position : raise KeyError ( '%s is already in the queue' % repr ( key ) ) self [ key ] = value
Add a new item . Raises KeyError if key is already in the pqdict .
4,351
def pushpopitem ( self , key , value , node_factory = _Node ) : heap = self . _heap position = self . _position precedes = self . _precedes prio = self . _keyfn ( value ) if self . _keyfn else value node = node_factory ( key , value , prio ) if key in self : raise KeyError ( '%s is already in the queue' % repr ( key ) ) if heap and precedes ( heap [ 0 ] . prio , node . prio ) : node , heap [ 0 ] = heap [ 0 ] , node position [ key ] = 0 del position [ node . key ] self . _sink ( 0 ) return node . key , node . value
Equivalent to inserting a new item followed by removing the top priority item but faster . Raises KeyError if the new key is already in the pqdict .
4,352
def updateitem ( self , key , new_val ) : if key not in self . _position : raise KeyError ( key ) self [ key ] = new_val
Update the priority value of an existing item . Raises KeyError if key is not in the pqdict .
4,353
def replace_key ( self , key , new_key ) : heap = self . _heap position = self . _position if new_key in self : raise KeyError ( '%s is already in the queue' % repr ( new_key ) ) pos = position . pop ( key ) position [ new_key ] = pos heap [ pos ] . key = new_key
Replace the key of an existing heap node in place . Raises KeyError if the key to replace does not exist or if the new key is already in the pqdict .
4,354
def swap_priority ( self , key1 , key2 ) : heap = self . _heap position = self . _position if key1 not in self or key2 not in self : raise KeyError pos1 , pos2 = position [ key1 ] , position [ key2 ] heap [ pos1 ] . key , heap [ pos2 ] . key = key2 , key1 position [ key1 ] , position [ key2 ] = pos2 , pos1
Fast way to swap the priority level of two items in the pqdict . Raises KeyError if either key does not exist .
4,355
def heapify ( self , key = __marker ) : if key is self . __marker : n = len ( self . _heap ) for pos in reversed ( range ( n // 2 ) ) : self . _sink ( pos ) else : try : pos = self . _position [ key ] except KeyError : raise KeyError ( key ) self . _reheapify ( pos )
Repair a broken heap . If the state of an item s priority value changes you can re - sort the relevant item only by providing key .
4,356
def package_has_version_file ( package_name ) : version_file_path = helpers . package_file_path ( '_version.py' , package_name ) return os . path . isfile ( version_file_path )
Check to make sure _version . py is contained in the package
4,357
def get_project_name ( ) : setup_py_content = helpers . get_file_content ( 'setup.py' ) ret = helpers . value_of_named_argument_in_function ( 'name' , 'setup' , setup_py_content , resolve_varname = True ) if ret and ret [ 0 ] == ret [ - 1 ] in ( '"' , "'" ) : ret = ret [ 1 : - 1 ] return ret
Grab the project name out of setup . py
4,358
def get_version ( package_name , ignore_cache = False ) : if ignore_cache : with microcache . temporarily_disabled ( ) : found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' , package_name , return_match = True ) else : found = helpers . regex_in_package_file ( VERSION_SET_REGEX , '_version.py' , package_name , return_match = True ) if found is None : raise ProjectError ( 'found {}, but __version__ is not defined' ) current_version = found [ 'version' ] return current_version
Get the version which is currently configured by the package
4,359
def set_version ( package_name , version_str ) : current_version = get_version ( package_name ) version_file_path = helpers . package_file_path ( '_version.py' , package_name ) version_file_content = helpers . get_file_content ( version_file_path ) version_file_content = version_file_content . replace ( current_version , version_str ) with open ( version_file_path , 'w' ) as version_file : version_file . write ( version_file_content )
Set the version in _version . py to version_str
4,360
def version_is_valid ( version_str ) : try : packaging . version . Version ( version_str ) except packaging . version . InvalidVersion : return False return True
Check to see if the version specified is a valid as far as pkg_resources is concerned
4,361
def _get_uploaded_versions_warehouse ( project_name , index_url , requests_verify = True ) : url = '/' . join ( ( index_url , project_name , 'json' ) ) response = requests . get ( url , verify = requests_verify ) if response . status_code == 200 : return response . json ( ) [ 'releases' ] . keys ( ) return None
Query the pypi index at index_url using warehouse api to find all of the releases
4,362
def _get_uploaded_versions_pypicloud ( project_name , index_url , requests_verify = True ) : api_url = index_url for suffix in ( '/pypi' , '/pypi/' , '/simple' , '/simple/' ) : if api_url . endswith ( suffix ) : api_url = api_url [ : len ( suffix ) * - 1 ] + '/api/package' break url = '/' . join ( ( api_url , project_name ) ) response = requests . get ( url , verify = requests_verify ) if response . status_code == 200 : return [ p [ 'version' ] for p in response . json ( ) [ 'packages' ] ] return None
Query the pypi index at index_url using pypicloud api to find all versions
4,363
def version_already_uploaded ( project_name , version_str , index_url , requests_verify = True ) : all_versions = _get_uploaded_versions ( project_name , index_url , requests_verify ) return version_str in all_versions
Check to see if the version specified has already been uploaded to the configured index
4,364
def convert_readme_to_rst ( ) : project_files = os . listdir ( '.' ) for filename in project_files : if filename . lower ( ) == 'readme' : raise ProjectError ( 'found {} in project directory...' . format ( filename ) + 'not sure what to do with it, refusing to convert' ) elif filename . lower ( ) == 'readme.rst' : raise ProjectError ( 'found {} in project directory...' . format ( filename ) + 'refusing to overwrite' ) for filename in project_files : if filename . lower ( ) == 'readme.md' : rst_filename = 'README.rst' logger . info ( 'converting {} to {}' . format ( filename , rst_filename ) ) try : rst_content = pypandoc . convert ( filename , 'rst' ) with open ( 'README.rst' , 'w' ) as rst_file : rst_file . write ( rst_content ) return except OSError as e : raise ProjectError ( 'could not convert readme to rst due to pypandoc error:' + os . linesep + str ( e ) ) raise ProjectError ( 'could not find any README.md file to convert' )
Attempt to convert a README . md file into README . rst
4,365
def get_packaged_files ( package_name ) : if not os . path . isdir ( 'dist' ) : return [ ] return [ os . path . join ( 'dist' , filename ) for filename in os . listdir ( 'dist' ) ]
Collect relative paths to all files which have already been packaged
4,366
def multiple_packaged_versions ( package_name ) : dist_files = os . listdir ( 'dist' ) versions = set ( ) for filename in dist_files : version = funcy . re_find ( r'{}-(.+).tar.gz' . format ( package_name ) , filename ) if version : versions . add ( version ) return len ( versions ) > 1
Look through built package directory and see if there are multiple versions there
4,367
def period_neighborhood_probability ( self , radius , smoothing , threshold , stride , start_time , end_time ) : neighbor_x = self . x [ : : stride , : : stride ] neighbor_y = self . y [ : : stride , : : stride ] neighbor_kd_tree = cKDTree ( np . vstack ( ( neighbor_x . ravel ( ) , neighbor_y . ravel ( ) ) ) . T ) neighbor_prob = np . zeros ( ( self . data . shape [ 0 ] , neighbor_x . shape [ 0 ] , neighbor_x . shape [ 1 ] ) ) print ( 'Forecast Hours: {0}-{1}' . format ( start_time , end_time ) ) for m in range ( len ( self . members ) ) : period_max = self . data [ m , start_time : end_time , : , : ] . max ( axis = 0 ) valid_i , valid_j = np . where ( period_max >= threshold ) print ( self . members [ m ] , len ( valid_i ) ) if len ( valid_i ) > 0 : var_kd_tree = cKDTree ( np . vstack ( ( self . x [ valid_i , valid_j ] , self . y [ valid_i , valid_j ] ) ) . T ) exceed_points = np . unique ( np . concatenate ( var_kd_tree . query_ball_tree ( neighbor_kd_tree , radius ) ) ) . astype ( int ) exceed_i , exceed_j = np . unravel_index ( exceed_points , neighbor_x . shape ) neighbor_prob [ m ] [ exceed_i , exceed_j ] = 1 if smoothing > 0 : neighbor_prob [ m ] = gaussian_filter ( neighbor_prob [ m ] , smoothing , mode = 'constant' ) return neighbor_prob
Calculate the neighborhood probability over the full period of the forecast
4,368
def load_map_info ( self , map_file ) : if self . ensemble_name . upper ( ) == "SSEF" : proj_dict , grid_dict = read_arps_map_file ( map_file ) self . dx = int ( grid_dict [ "dx" ] ) mapping_data = make_proj_grids ( proj_dict , grid_dict ) for m , v in mapping_data . items ( ) : setattr ( self , m , v ) self . i , self . j = np . indices ( self . lon . shape ) self . proj = get_proj_obj ( proj_dict ) elif self . ensemble_name . upper ( ) in [ "NCAR" , "NCARSTORM" , "HRRR" , "VSE" , "HREFV2" ] : proj_dict , grid_dict = read_ncar_map_file ( map_file ) if self . member_name [ 0 : 7 ] == "1km_pbl" : grid_dict [ "dx" ] = 1000 grid_dict [ "dy" ] = 1000 grid_dict [ "sw_lon" ] = 258.697 grid_dict [ "sw_lat" ] = 23.999 grid_dict [ "ne_lon" ] = 282.868269206236 grid_dict [ "ne_lat" ] = 36.4822338520542 self . dx = int ( grid_dict [ "dx" ] ) mapping_data = make_proj_grids ( proj_dict , grid_dict ) for m , v in mapping_data . items ( ) : setattr ( self , m , v ) self . i , self . j = np . indices ( self . lon . shape ) self . proj = get_proj_obj ( proj_dict )
Load map projection information and create latitude longitude x y i and j grids for the projection .
4,369
def read_geojson ( filename ) : json_file = open ( filename ) data = json . load ( json_file ) json_file . close ( ) times = data [ "properties" ] [ "times" ] main_data = dict ( timesteps = [ ] , masks = [ ] , x = [ ] , y = [ ] , i = [ ] , j = [ ] ) attribute_data = dict ( ) for feature in data [ "features" ] : for main_name in main_data . keys ( ) : main_data [ main_name ] . append ( np . array ( feature [ "properties" ] [ main_name ] ) ) for k , v in feature [ "properties" ] [ "attributes" ] . items ( ) : if k not in attribute_data . keys ( ) : attribute_data [ k ] = [ np . array ( v ) ] else : attribute_data [ k ] . append ( np . array ( v ) ) kwargs = { } for kw in [ "dx" , "step" , "u" , "v" ] : if kw in data [ "properties" ] . keys ( ) : kwargs [ kw ] = data [ "properties" ] [ kw ] sto = STObject ( main_data [ "timesteps" ] , main_data [ "masks" ] , main_data [ "x" ] , main_data [ "y" ] , main_data [ "i" ] , main_data [ "j" ] , times [ 0 ] , times [ - 1 ] , ** kwargs ) for k , v in attribute_data . items ( ) : sto . attributes [ k ] = v return sto
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file .
4,370
def center_of_mass ( self , time ) : if self . start_time <= time <= self . end_time : diff = time - self . start_time valid = np . flatnonzero ( self . masks [ diff ] != 0 ) if valid . size > 0 : com_x = 1.0 / self . timesteps [ diff ] . ravel ( ) [ valid ] . sum ( ) * np . sum ( self . timesteps [ diff ] . ravel ( ) [ valid ] * self . x [ diff ] . ravel ( ) [ valid ] ) com_y = 1.0 / self . timesteps [ diff ] . ravel ( ) [ valid ] . sum ( ) * np . sum ( self . timesteps [ diff ] . ravel ( ) [ valid ] * self . y [ diff ] . ravel ( ) [ valid ] ) else : com_x = np . mean ( self . x [ diff ] ) com_y = np . mean ( self . y [ diff ] ) else : com_x = None com_y = None return com_x , com_y
Calculate the center of mass at a given timestep .
4,371
def trajectory ( self ) : traj = np . zeros ( ( 2 , self . times . size ) ) for t , time in enumerate ( self . times ) : traj [ : , t ] = self . center_of_mass ( time ) return traj
Calculates the center of mass for each time step and outputs an array
4,372
def get_corner ( self , time ) : if self . start_time <= time <= self . end_time : diff = time - self . start_time return self . i [ diff ] [ 0 , 0 ] , self . j [ diff ] [ 0 , 0 ] else : return - 1 , - 1
Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject .
4,373
def size ( self , time ) : if self . start_time <= time <= self . end_time : return self . masks [ time - self . start_time ] . sum ( ) else : return 0
Gets the size of the object at a given time .
4,374
def max_intensity ( self , time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] return self . timesteps [ ti ] . max ( )
Calculate the maximum intensity found at a timestep .
4,375
def boundary_polygon ( self , time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] com_x , com_y = self . center_of_mass ( time ) padded_mask = np . pad ( self . masks [ ti ] , 1 , 'constant' , constant_values = 0 ) chull = convex_hull_image ( padded_mask ) boundary_image = find_boundaries ( chull , mode = 'inner' , background = 0 ) boundary_image = boundary_image [ 1 : - 1 , 1 : - 1 ] boundary_x = self . x [ ti ] . ravel ( ) [ boundary_image . ravel ( ) ] boundary_y = self . y [ ti ] . ravel ( ) [ boundary_image . ravel ( ) ] r = np . sqrt ( ( boundary_x - com_x ) ** 2 + ( boundary_y - com_y ) ** 2 ) theta = np . arctan2 ( ( boundary_y - com_y ) , ( boundary_x - com_x ) ) * 180.0 / np . pi + 360 polar_coords = np . array ( [ ( r [ x ] , theta [ x ] ) for x in range ( r . size ) ] , dtype = [ ( 'r' , 'f4' ) , ( 'theta' , 'f4' ) ] ) coord_order = np . argsort ( polar_coords , order = [ 'theta' , 'r' ] ) ordered_coords = np . vstack ( [ boundary_x [ coord_order ] , boundary_y [ coord_order ] ] ) return ordered_coords
Get coordinates of object boundary in counter - clockwise order
4,376
def estimate_motion ( self , time , intensity_grid , max_u , max_v ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] mask_vals = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) i_vals = self . i [ ti ] . ravel ( ) [ mask_vals ] j_vals = self . j [ ti ] . ravel ( ) [ mask_vals ] obj_vals = self . timesteps [ ti ] . ravel ( ) [ mask_vals ] u_shifts = np . arange ( - max_u , max_u + 1 ) v_shifts = np . arange ( - max_v , max_v + 1 ) min_error = 99999999999.0 best_u = 0 best_v = 0 for u in u_shifts : j_shift = j_vals - u for v in v_shifts : i_shift = i_vals - v if np . all ( ( 0 <= i_shift ) & ( i_shift < intensity_grid . shape [ 0 ] ) & ( 0 <= j_shift ) & ( j_shift < intensity_grid . shape [ 1 ] ) ) : shift_vals = intensity_grid [ i_shift , j_shift ] else : shift_vals = np . zeros ( i_shift . shape ) error = np . abs ( shift_vals - obj_vals ) . mean ( ) if error < min_error : min_error = error best_u = u * self . dx best_v = v * self . dx self . u [ ti ] = best_u self . v [ ti ] = best_v return best_u , best_v , min_error
Estimate the motion of the object with cross - correlation on the intensity values from the previous time step .
4,377
def count_overlap ( self , time , other_object , other_time ) : ti = np . where ( time == self . times ) [ 0 ] [ 0 ] ma = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) oti = np . where ( other_time == other_object . times ) [ 0 ] obj_coords = np . zeros ( self . masks [ ti ] . sum ( ) , dtype = [ ( 'x' , int ) , ( 'y' , int ) ] ) other_obj_coords = np . zeros ( other_object . masks [ oti ] . sum ( ) , dtype = [ ( 'x' , int ) , ( 'y' , int ) ] ) obj_coords [ 'x' ] = self . i [ ti ] . ravel ( ) [ ma ] obj_coords [ 'y' ] = self . j [ ti ] . ravel ( ) [ ma ] other_obj_coords [ 'x' ] = other_object . i [ oti ] [ other_object . masks [ oti ] == 1 ] other_obj_coords [ 'y' ] = other_object . j [ oti ] [ other_object . masks [ oti ] == 1 ] return float ( np . intersect1d ( obj_coords , other_obj_coords ) . size ) / np . maximum ( self . masks [ ti ] . sum ( ) , other_object . masks [ oti ] . sum ( ) )
Counts the number of points that overlap between this STObject and another STObject . Used for tracking .
4,378
def extract_attribute_array ( self , data_array , var_name ) : if var_name not in self . attributes . keys ( ) : self . attributes [ var_name ] = [ ] for t in range ( self . times . size ) : self . attributes [ var_name ] . append ( data_array [ self . i [ t ] , self . j [ t ] ] )
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object .
4,379
def extract_tendency_grid ( self , model_grid ) : var_name = model_grid . variable + "-tendency" self . attributes [ var_name ] = [ ] timesteps = np . arange ( self . start_time , self . end_time + 1 ) for ti , t in enumerate ( timesteps ) : t_index = t - model_grid . start_hour self . attributes [ var_name ] . append ( model_grid . data [ t_index , self . i [ ti ] , self . j [ ti ] ] - model_grid . data [ t_index - 1 , self . i [ ti ] , self . j [ ti ] ] )
Extracts the difference in model outputs
4,380
def calc_timestep_statistic ( self , statistic , time ) : ti = np . where ( self . times == time ) [ 0 ] [ 0 ] ma = np . where ( self . masks [ ti ] . ravel ( ) == 1 ) if statistic in [ 'mean' , 'max' , 'min' , 'std' , 'ptp' ] : stat_val = getattr ( self . timesteps [ ti ] . ravel ( ) [ ma ] , statistic ) ( ) elif statistic == 'median' : stat_val = np . median ( self . timesteps [ ti ] . ravel ( ) [ ma ] ) elif 'percentile' in statistic : per = int ( statistic . split ( "_" ) [ 1 ] ) stat_val = np . percentile ( self . timesteps [ ti ] . ravel ( ) [ ma ] , per ) elif 'dt' in statistic : stat_name = statistic [ : - 3 ] if ti == 0 : stat_val = 0 else : stat_val = self . calc_timestep_statistic ( stat_name , time ) - self . calc_timestep_statistic ( stat_name , time - 1 ) else : stat_val = np . nan return stat_val
Calculate statistics from the primary attribute of the StObject .
4,381
def calc_shape_step ( self , stat_names , time ) : ti = np . where ( self . times == time ) [ 0 ] [ 0 ] props = regionprops ( self . masks [ ti ] , self . timesteps [ ti ] ) [ 0 ] shape_stats = [ ] for stat_name in stat_names : if "moments_hu" in stat_name : hu_index = int ( stat_name . split ( "_" ) [ - 1 ] ) hu_name = "_" . join ( stat_name . split ( "_" ) [ : - 1 ] ) hu_val = np . log ( props [ hu_name ] [ hu_index ] ) if np . isnan ( hu_val ) : shape_stats . append ( 0 ) else : shape_stats . append ( hu_val ) else : shape_stats . append ( props [ stat_name ] ) return shape_stats
Calculate shape statistics for a single time step
4,382
def to_geojson ( self , filename , proj , metadata = None ) : if metadata is None : metadata = { } json_obj = { "type" : "FeatureCollection" , "features" : [ ] , "properties" : { } } json_obj [ 'properties' ] [ 'times' ] = self . times . tolist ( ) json_obj [ 'properties' ] [ 'dx' ] = self . dx json_obj [ 'properties' ] [ 'step' ] = self . step json_obj [ 'properties' ] [ 'u' ] = self . u . tolist ( ) json_obj [ 'properties' ] [ 'v' ] = self . v . tolist ( ) for k , v in metadata . items ( ) : json_obj [ 'properties' ] [ k ] = v for t , time in enumerate ( self . times ) : feature = { "type" : "Feature" , "geometry" : { "type" : "Polygon" } , "properties" : { } } boundary_coords = self . boundary_polygon ( time ) lonlat = np . vstack ( proj ( boundary_coords [ 0 ] , boundary_coords [ 1 ] , inverse = True ) ) lonlat_list = lonlat . T . tolist ( ) if len ( lonlat_list ) > 0 : lonlat_list . append ( lonlat_list [ 0 ] ) feature [ "geometry" ] [ "coordinates" ] = [ lonlat_list ] for attr in [ "timesteps" , "masks" , "x" , "y" , "i" , "j" ] : feature [ "properties" ] [ attr ] = getattr ( self , attr ) [ t ] . tolist ( ) feature [ "properties" ] [ "attributes" ] = { } for attr_name , steps in self . attributes . items ( ) : feature [ "properties" ] [ "attributes" ] [ attr_name ] = steps [ t ] . tolist ( ) json_obj [ 'features' ] . append ( feature ) file_obj = open ( filename , "w" ) json . dump ( json_obj , file_obj , indent = 1 , sort_keys = True ) file_obj . close ( ) return
Output the data in the STObject to a geoJSON file .
4,383
def model ( self , v = None ) : "Returns the model of node v" if v is None : v = self . estopping hist = self . hist trace = self . trace ( v ) ins = None if self . _base . _probability_calibration is not None : node = hist [ - 1 ] node . normalize ( ) X = np . array ( [ x . full_array ( ) for x in node . hy ] ) . T y = np . array ( self . _base . _y_klass . full_array ( ) ) mask = np . ones ( X . shape [ 0 ] , dtype = np . bool ) mask [ np . array ( self . _base . _mask_ts . index ) ] = False ins = self . _base . _probability_calibration ( ) . fit ( X [ mask ] , y [ mask ] ) if self . _classifier : nclasses = self . _labels . shape [ 0 ] else : nclasses = None m = Model ( trace , hist , nvar = self . _base . _nvar , classifier = self . _classifier , labels = self . _labels , probability_calibration = ins , nclasses = nclasses ) return m
Returns the model of node v
4,384
def trace ( self , n ) : "Restore the position in the history of individual v's nodes" trace_map = { } self . _trace ( n , trace_map ) s = list ( trace_map . keys ( ) ) s . sort ( ) return s
Restore the position in the history of individual v s nodes
4,385
def tournament ( self , negative = False ) : if self . generation <= self . _random_generations and not negative : return self . random_selection ( ) if not self . _negative_selection and negative : return self . random_selection ( negative = negative ) vars = self . random ( ) fit = [ ( k , self . population [ x ] . fitness ) for k , x in enumerate ( vars ) ] if negative : fit = min ( fit , key = lambda x : x [ 1 ] ) else : fit = max ( fit , key = lambda x : x [ 1 ] ) index = fit [ 0 ] return vars [ index ]
Tournament selection and when negative is True it performs negative tournament selection
4,386
def create_population ( self ) : "Create the initial population" base = self . _base if base . _share_inputs : used_inputs_var = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) used_inputs_naive = used_inputs_var if base . _pr_variable == 0 : used_inputs_var = SelectNumbers ( [ ] ) used_inputs_naive = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) elif base . _pr_variable == 1 : used_inputs_var = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) used_inputs_naive = SelectNumbers ( [ ] ) else : used_inputs_var = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) used_inputs_naive = SelectNumbers ( [ x for x in range ( base . nvar ) ] ) nb_input = Inputs ( base , used_inputs_naive , functions = base . _input_functions ) while ( ( base . _all_inputs and not base . stopping_criteria_tl ( ) ) or ( self . popsize < base . popsize and not base . stopping_criteria ( ) ) ) : if base . _all_inputs and used_inputs_var . empty ( ) and used_inputs_naive . empty ( ) : base . _init_popsize = self . popsize break if nb_input . use_all_variables ( ) : v = nb_input . all_variables ( ) if v is None : continue elif not used_inputs_var . empty ( ) and np . random . random ( ) < base . _pr_variable : v = self . variable_input ( used_inputs_var ) if v is None : used_inputs_var . pos = used_inputs_var . size continue elif not used_inputs_naive . empty ( ) : v = nb_input . input ( ) if not used_inputs_var . empty ( ) and used_inputs_naive . empty ( ) : base . _pr_variable = 1 if v is None : used_inputs_naive . pos = used_inputs_naive . size if not used_inputs_var . empty ( ) : base . _pr_variable = 1 continue else : gen = self . generation self . generation = 0 v = base . random_offspring ( ) self . generation = gen self . add ( v )
Create the initial population
4,387
def add ( self , v ) : "Add an individual to the population" self . population . append ( v ) self . _current_popsize += 1 v . position = len ( self . _hist ) self . _hist . append ( v ) self . bsf = v self . estopping = v self . _density += self . get_density ( v )
Add an individual to the population
4,388
def replace ( self , v ) : if self . popsize < self . _popsize : return self . add ( v ) k = self . tournament ( negative = True ) self . clean ( self . population [ k ] ) self . population [ k ] = v v . position = len ( self . _hist ) self . _hist . append ( v ) self . bsf = v self . estopping = v self . _inds_replace += 1 self . _density += self . get_density ( v ) if self . _inds_replace == self . _popsize : self . _inds_replace = 0 self . generation += 1 gc . collect ( )
Replace an individual selected by negative tournament selection with individual v
4,389
def make_directory_if_needed ( directory_path ) : if os . path . exists ( directory_path ) : if not os . path . isdir ( directory_path ) : raise OSError ( "Path is not a directory:" , directory_path ) else : os . makedirs ( directory_path )
Make the directory path if needed .
4,390
def hatchery ( ) : args = docopt . docopt ( __doc__ ) task_list = args [ '<task>' ] if not task_list or 'help' in task_list or args [ '--help' ] : print ( __doc__ . format ( version = _version . __version__ , config_files = config . CONFIG_LOCATIONS ) ) return 0 level_str = args [ '--log-level' ] try : level_const = getattr ( logging , level_str . upper ( ) ) logging . basicConfig ( level = level_const ) if level_const == logging . DEBUG : workdir . options . debug = True except LookupError : logging . basicConfig ( ) logger . error ( 'received invalid log level: ' + level_str ) return 1 for task in task_list : if task not in ORDERED_TASKS : logger . info ( 'starting task: check' ) logger . error ( 'received invalid task: ' + task ) return 1 for task in CHECK_TASKS : if task in task_list : task_check ( args ) break if 'package' in task_list and not args [ '--release-version' ] : logger . error ( '--release-version is required for the package task' ) return 1 config_dict = _get_config_or_die ( calling_task = 'hatchery' , required_params = [ 'auto_push_tag' ] ) if config_dict [ 'auto_push_tag' ] and 'upload' in task_list : logger . info ( 'adding task: tag (auto_push_tag==True)' ) task_list . append ( 'tag' ) for task in ORDERED_TASKS : if task in task_list and task != 'check' : logger . info ( 'starting task: ' + task ) globals ( ) [ 'task_' + task ] ( args ) logger . info ( "all's well that ends well...hatchery out" ) return 0
Main entry point for the hatchery program
4,391
def call ( cmd_args , suppress_output = False ) : if not funcy . is_list ( cmd_args ) and not funcy . is_tuple ( cmd_args ) : cmd_args = shlex . split ( cmd_args ) logger . info ( 'executing `{}`' . format ( ' ' . join ( cmd_args ) ) ) call_request = CallRequest ( cmd_args , suppress_output = suppress_output ) call_result = call_request . run ( ) if call_result . exitval : logger . error ( '`{}` returned error code {}' . format ( ' ' . join ( cmd_args ) , call_result . exitval ) ) return call_result
Call an arbitary command and return the exit value stdout and stderr as a tuple
4,392
def setup ( cmd_args , suppress_output = False ) : if not funcy . is_list ( cmd_args ) and not funcy . is_tuple ( cmd_args ) : cmd_args = shlex . split ( cmd_args ) cmd_args = [ sys . executable , 'setup.py' ] + [ x for x in cmd_args ] return call ( cmd_args , suppress_output = suppress_output )
Call a setup . py command or list of commands
4,393
def load_data ( self ) : data = [ ] valid_dates = [ ] mrms_files = np . array ( sorted ( os . listdir ( self . path + self . variable + "/" ) ) ) mrms_file_dates = np . array ( [ m_file . split ( "_" ) [ - 2 ] . split ( "-" ) [ 0 ] for m_file in mrms_files ] ) old_mrms_file = None file_obj = None for t in range ( self . all_dates . shape [ 0 ] ) : file_index = np . where ( mrms_file_dates == self . all_dates [ t ] . strftime ( "%Y%m%d" ) ) [ 0 ] if len ( file_index ) > 0 : mrms_file = mrms_files [ file_index ] [ 0 ] if mrms_file is not None : if file_obj is not None : file_obj . close ( ) file_obj = Dataset ( self . path + self . variable + "/" + mrms_file ) if "time" in file_obj . variables . keys ( ) : time_var = "time" else : time_var = "date" file_valid_dates = pd . DatetimeIndex ( num2date ( file_obj . variables [ time_var ] [ : ] , file_obj . variables [ time_var ] . units ) ) else : file_valid_dates = pd . DatetimeIndex ( [ ] ) time_index = np . where ( file_valid_dates . values == self . all_dates . values [ t ] ) [ 0 ] if len ( time_index ) > 0 : data . append ( file_obj . variables [ self . variable ] [ time_index [ 0 ] ] ) valid_dates . append ( self . all_dates [ t ] ) if file_obj is not None : file_obj . close ( ) self . data = np . array ( data ) self . data [ self . data < 0 ] = 0 self . data [ self . data > 150 ] = 150 self . valid_dates = pd . DatetimeIndex ( valid_dates )
Loads data files and stores the output in the data attribute .
4,394
def rescale_data ( data , data_min , data_max , out_min = 0.0 , out_max = 100.0 ) : return ( out_max - out_min ) / ( data_max - data_min ) * ( data - data_min ) + out_min
Rescale your input data so that is ranges over integer values which will perform better in the watershed .
4,395
def label ( self , input_grid ) : marked = self . find_local_maxima ( input_grid ) marked = np . where ( marked >= 0 , 1 , 0 ) markers = splabel ( marked ) [ 0 ] return markers
Labels input grid using enhanced watershed algorithm .
4,396
def find_local_maxima ( self , input_grid ) : pixels , q_data = self . quantize ( input_grid ) centers = OrderedDict ( ) for p in pixels . keys ( ) : centers [ p ] = [ ] marked = np . ones ( q_data . shape , dtype = int ) * self . UNMARKED MIN_INFL = int ( np . round ( 1 + 0.5 * np . sqrt ( self . max_size ) ) ) MAX_INFL = 2 * MIN_INFL marked_so_far = [ ] for b in sorted ( pixels . keys ( ) , reverse = True ) : infl_dist = MIN_INFL + int ( np . round ( float ( b ) / self . max_bin * ( MAX_INFL - MIN_INFL ) ) ) for p in pixels [ b ] : if marked [ p ] == self . UNMARKED : ok = False del marked_so_far [ : ] for ( i , j ) , v in np . ndenumerate ( marked [ p [ 0 ] - infl_dist : p [ 0 ] + infl_dist + 1 , p [ 1 ] - infl_dist : p [ 1 ] + infl_dist + 1 ] ) : if v == self . UNMARKED : ok = True marked [ i - infl_dist + p [ 0 ] , j - infl_dist + p [ 1 ] ] = b marked_so_far . append ( ( i - infl_dist + p [ 0 ] , j - infl_dist + p [ 1 ] ) ) else : ok = False break if ok : centers [ b ] . append ( p ) else : for m in marked_so_far : marked [ m ] = self . UNMARKED marked [ : , : ] = self . UNMARKED deferred_from_last = [ ] deferred_to_next = [ ] for delta in range ( 0 , self . delta + 1 ) : for b in sorted ( centers . keys ( ) , reverse = True ) : bin_lower = b - delta deferred_from_last [ : ] = deferred_to_next [ : ] del deferred_to_next [ : ] foothills = [ ] n_centers = len ( centers [ b ] ) tot_centers = n_centers + len ( deferred_from_last ) for i in range ( tot_centers ) : if i < n_centers : center = centers [ b ] [ i ] else : center = deferred_from_last [ i - n_centers ] if bin_lower < 0 : bin_lower = 0 if marked [ center ] == self . UNMARKED : captured = self . set_maximum ( q_data , marked , center , bin_lower , foothills ) if not captured : deferred_to_next . append ( center ) else : pass self . remove_foothills ( q_data , marked , b , bin_lower , centers , foothills ) del deferred_from_last [ : ] del deferred_to_next [ : ] return marked
Finds the local maxima in the inputGrid and perform region growing to identify objects .
4,397
def set_maximum ( self , q_data , marked , center , bin_lower , foothills ) : as_bin = [ ] as_glob = [ ] marked_so_far = [ ] will_be_considered_again = False as_bin . append ( center ) center_data = q_data [ center ] while len ( as_bin ) > 0 : p = as_bin . pop ( - 1 ) if marked [ p ] != self . UNMARKED : continue marked [ p ] = q_data [ center ] marked_so_far . append ( p ) for index , val in np . ndenumerate ( marked [ p [ 0 ] - 1 : p [ 0 ] + 2 , p [ 1 ] - 1 : p [ 1 ] + 2 ] ) : if val == self . UNMARKED : pixel = ( index [ 0 ] - 1 + p [ 0 ] , index [ 1 ] - 1 + p [ 1 ] ) p_data = q_data [ pixel ] if ( not will_be_considered_again ) and ( p_data >= 0 ) and ( p_data < center_data ) : will_be_considered_again = True if p_data >= bin_lower and ( np . abs ( center_data - p_data ) <= self . delta ) : as_bin . append ( pixel ) elif p_data >= 0 : as_glob . append ( pixel ) if bin_lower == 0 : will_be_considered_again = False big_enough = len ( marked_so_far ) >= self . max_size if big_enough : foothills . append ( ( center , as_glob ) ) elif will_be_considered_again : for m in marked_so_far : marked [ m ] = self . UNMARKED del as_bin [ : ] del as_glob [ : ] del marked_so_far [ : ] return big_enough or ( not will_be_considered_again )
Grow a region at a certain bin level and check if the region has reached the maximum size .
4,398
def remove_foothills ( self , q_data , marked , bin_num , bin_lower , centers , foothills ) : hills = [ ] for foot in foothills : center = foot [ 0 ] hills [ : ] = foot [ 1 ] [ : ] while len ( hills ) > 0 : pt = hills . pop ( - 1 ) marked [ pt ] = self . GLOBBED for s_index , val in np . ndenumerate ( marked [ pt [ 0 ] - 1 : pt [ 0 ] + 2 , pt [ 1 ] - 1 : pt [ 1 ] + 2 ] ) : index = ( s_index [ 0 ] - 1 + pt [ 0 ] , s_index [ 1 ] - 1 + pt [ 1 ] ) if val == self . UNMARKED : if ( q_data [ index ] >= 0 ) and ( q_data [ index ] < bin_lower ) and ( ( q_data [ index ] <= q_data [ pt ] ) or self . is_closest ( index , center , centers , bin_num ) ) : hills . append ( index ) del foothills [ : ]
Mark points determined to be foothills as globbed so that they are not included in future searches . Also searches neighboring points to foothill points to determine if they should also be considered foothills .
4,399
def quantize ( self , input_grid ) : pixels = { } for i in range ( self . max_bin + 1 ) : pixels [ i ] = [ ] data = ( np . array ( input_grid , dtype = int ) - self . min_thresh ) / self . data_increment data [ data < 0 ] = - 1 data [ data > self . max_bin ] = self . max_bin good_points = np . where ( data >= 0 ) for g in np . arange ( good_points [ 0 ] . shape [ 0 ] ) : pixels [ data [ ( good_points [ 0 ] [ g ] , good_points [ 1 ] [ g ] ) ] ] . append ( ( good_points [ 0 ] [ g ] , good_points [ 1 ] [ g ] ) ) return pixels , data
Quantize a grid into discrete steps based on input parameters .