text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def string_is_url(test_str):
""" Test to see if a string is a URL or not, defined in this case as a string for which urlparse returns a scheme component False True """ |
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def item_transaction(self, item) -> Transaction: """Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe. """ |
items = self.__build_transaction_items(item)
transaction = Transaction(self, item, items)
self.__transactions.append(transaction)
return transaction |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert_data_item(self, before_index, data_item, auto_display: bool = True) -> None: """Insert a new data item into document model. This method is NOT threadsafe. """ |
assert data_item is not None
assert data_item not in self.data_items
assert before_index <= len(self.data_items) and before_index >= 0
assert data_item.uuid not in self.__uuid_to_data_item
# update the session
data_item.session_id = self.session_id
# insert in internal list
self.__insert_data_item(before_index, data_item, do_write=True)
# automatically add a display
if auto_display:
display_item = DisplayItem.DisplayItem(data_item=data_item)
self.append_display_item(display_item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> typing.Optional[typing.Sequence]: """Remove data item from document model. This method is NOT threadsafe. """ |
# remove data item from any computations
return self.__cascade_delete(data_item, safe=safe) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transaction_context(self):
"""Return a context object for a document-wide transaction.""" |
class DocumentModelTransaction:
def __init__(self, document_model):
self.__document_model = document_model
def __enter__(self):
self.__document_model.persistent_object_context.enter_write_delay(self.__document_model)
return self
def __exit__(self, type, value, traceback):
self.__document_model.persistent_object_context.exit_write_delay(self.__document_model)
self.__document_model.persistent_object_context.rewrite_item(self.__document_model)
return DocumentModelTransaction(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_item_live(self, data_item):
""" Return a context manager to put the data item in a 'live state'. """ |
class LiveContextManager:
def __init__(self, manager, object):
self.__manager = manager
self.__object = object
def __enter__(self):
self.__manager.begin_data_item_live(self.__object)
return self
def __exit__(self, type, value, traceback):
self.__manager.end_data_item_live(self.__object)
return LiveContextManager(self, data_item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def begin_data_item_live(self, data_item):
"""Begins a live state for the data item. The live state is propagated to dependent data items. This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive. """ |
with self.__live_data_items_lock:
old_live_count = self.__live_data_items.get(data_item.uuid, 0)
self.__live_data_items[data_item.uuid] = old_live_count + 1
if old_live_count == 0:
data_item._enter_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.begin_data_item_live(dependent_data_item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def end_data_item_live(self, data_item):
"""Ends a live state for the data item. The live-ness property is propagated to dependent data items, similar to the transactions. This method is thread safe. """ |
with self.__live_data_items_lock:
live_count = self.__live_data_items.get(data_item.uuid, 0) - 1
assert live_count >= 0
self.__live_data_items[data_item.uuid] = live_count
if live_count == 0:
data_item._exit_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.end_data_item_live(dependent_data_item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel):
"""Construct a data item reference. Construct a data item reference and assign a data item to it. Update data item session id and session metadata. Also connect the data channel processor. This method is thread safe. """ |
session_id = self.session_id
key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id)
data_item_reference = self.get_data_item_reference(key)
with data_item_reference.mutex:
data_item = data_item_reference.data_item
# if we still don't have a data item, create it.
if data_item is None:
data_item = DataItem.DataItem()
data_item.ensure_data_source()
data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name
data_item.category = "temporary"
data_item_reference.data_item = data_item
def append_data_item():
self.append_data_item(data_item)
self._update_data_item_reference(key, data_item)
self.__call_soon(append_data_item)
def update_session():
# update the session, but only if necessary (this is an optimization to prevent unnecessary display updates)
if data_item.session_id != session_id:
data_item.session_id = session_id
session_metadata = ApplicationData.get_session_metadata_dict()
if data_item.session_metadata != session_metadata:
data_item.session_metadata = session_metadata
if data_channel.processor:
src_data_channel = hardware_source.data_channels[data_channel.src_channel_index]
src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id))
data_channel.processor.connect_data_item_reference(src_data_item_reference)
self.__call_soon(update_session)
return data_item_reference |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data_old(self):
""" Loads time series of 2D data grids from each opened file. The code handles loading a full time series from one file or individual time steps from multiple files. Missing files are supported. """ |
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data(self):
""" Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats. Returns: Array of data loaded from files in (time, y, x) dimensions, Units """ |
units = ""
if self.file_objects[0] is None:
raise IOError()
var_name, z_index = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys()))
ntimes = 0
if 'time' in self.file_objects[0].variables[var_name].dimensions:
ntimes = len(self.file_objects[0].dimensions['time'])
if ntimes > 1:
if z_index is None:
data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32)
else:
data = self.file_objects[0].variables[var_name][self.forecast_hours, z_index].astype(np.float32)
else:
y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:]
data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32)
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if z_index is None:
data[f] = file_object.variables[var_name][0]
else:
data[f] = file_object.variables[var_name][0, z_index]
if hasattr(self.file_objects[0].variables[var_name], "units"):
units = self.file_objects[0].variables[var_name].units
return data, units |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_var_name(variable, var_list):
""" Searches var list for variable name, checks other variable name format options. Args: variable (str):
Variable being loaded var_list (list):
List of variables in file. Returns: Name of variable in file containing relevant data, and index of variable z-level if multiple variables contained in same array in file. """ |
z_index = None
if variable in var_list:
var_name = variable
elif variable.ljust(6, "_") in var_list:
var_name = variable.ljust(6, "_")
elif any([variable in v_sub.split("_") for v_sub in var_list]):
var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)]
z_index = var_name.split("_").index(variable)
else:
raise KeyError("{0} not found in {1}".format(variable, var_list))
return var_name, z_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_models(self, model_path):
""" Save machine learning models to pickle files. """ |
for group, condition_model_set in self.condition_models.items():
for model_name, model_obj in condition_model_set.items():
out_filename = model_path + \
"{0}_{1}_condition.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, size_model_set in self.size_models.items():
for model_name, model_obj in size_model_set.items():
out_filename = model_path + \
"{0}_{1}_size.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, dist_model_set in self.size_distribution_models.items():
for model_type, model_objs in dist_model_set.items():
for model_name, model_obj in model_objs.items():
out_filename = model_path + \
"{0}_{1}_{2}_sizedist.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for model_type, track_type_models in self.track_models.items():
for group, track_model_set in track_type_models.items():
for model_name, model_obj in track_model_set.items():
out_filename = model_path + \
"{0}_{1}_{2}_track.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"):
""" Output hail forecast values to csv files by run date and ensemble member. Args: forecasts: mode: csv_path: Returns: """ |
merged_forecasts = pd.merge(forecasts["condition"],
forecasts["dist"],
on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"])
all_members = self.data[mode]["combo"]["Ensemble_Member"]
members = np.unique(all_members)
all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"])
run_dates = pd.DatetimeIndex(np.unique(all_run_dates))
print(run_dates)
for member in members:
for run_date in run_dates:
mem_run_index = (all_run_dates == run_date) & (all_members == member)
member_forecast = merged_forecasts.loc[mem_run_index]
member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name,
member,
run_date.strftime
(run_date_format))))
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_forecasts(self):
""" Loads the forecast files and gathers the forecast information into pandas DataFrames. """ |
forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"),
self.ensemble_member)
forecast_files = sorted(glob(forecast_path + "*.json"))
for forecast_file in forecast_files:
file_obj = open(forecast_file)
json_obj = json.load(file_obj)
file_obj.close()
track_id = json_obj['properties']["id"]
obs_track_id = json_obj['properties']["obs_track_id"]
forecast_hours = json_obj['properties']['times']
duration = json_obj['properties']['duration']
for f, feature in enumerate(json_obj['features']):
area = np.sum(feature["properties"]["masks"])
step_id = track_id + "_{0:02d}".format(f)
for model_type in self.model_types:
for model_name in self.model_names[model_type]:
prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")]
if model_type == "condition":
prediction = [prediction]
row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f],
f + 1, duration, area] + prediction
self.forecasts[model_type][model_name].loc[step_id] = row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_obs(self):
""" Loads the track total and step files and merges the information into a single data frame. """ |
track_total_file = self.track_data_csv_path + \
"track_total_{0}_{1}_{2}.csv".format(self.ensemble_name,
self.ensemble_member,
self.run_date.strftime("%Y%m%d"))
track_step_file = self.track_data_csv_path + \
"track_step_{0}_{1}_{2}.csv".format(self.ensemble_name,
self.ensemble_member,
self.run_date.strftime("%Y%m%d"))
track_total_cols = ["Track_ID", "Translation_Error_X", "Translation_Error_Y", "Start_Time_Error"]
track_step_cols = ["Step_ID", "Track_ID", "Hail_Size", "Shape", "Location", "Scale"]
track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols)
track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols)
obs_data = pd.merge(track_step_data, track_total_data, on="Track_ID", how="left")
self.obs = obs_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_obs(self):
""" Match forecasts and observations. """ |
for model_type in self.model_types:
self.matched_forecasts[model_type] = {}
for model_name in self.model_names[model_type]:
self.matched_forecasts[model_type][model_name] = pd.merge(self.forecasts[model_type][model_name],
self.obs, right_on="Step_ID", how="left",
left_index=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def roc(self, model_type, model_name, intensity_threshold, prob_thresholds, query=None):
""" Calculates a ROC curve at a specified intensity threshold. Args: model_type: type of model being evaluated (e.g. size). model_name: machine learning model being evaluated intensity_threshold: forecast bin used as the split point for evaluation prob_thresholds: Array of probability thresholds being evaluated. query: str to filter forecasts based on values of forecasts, obs, and metadata. Returns: A DistributedROC object """ |
roc_obj = DistributedROC(prob_thresholds, 0.5)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
obs_values = np.zeros(sub_forecasts.shape[0])
if sub_forecasts.shape[0] > 0:
if model_type == "dist":
forecast_values = np.array([gamma_sf(intensity_threshold, *params)
for params in sub_forecasts[self.forecast_bins[model_type]].values])
obs_probs = np.array([gamma_sf(intensity_threshold, *params)
for params in sub_forecasts[self.type_cols[model_type]].values])
obs_values[obs_probs >= 0.01] = 1
elif len(self.forecast_bins[model_type]) > 1:
fbin = np.argmin(np.abs(self.forecast_bins[model_type] - intensity_threshold))
forecast_values = 1 - sub_forecasts[self.forecast_bins[model_type].astype(str)].values.cumsum(axis=1)[:,
fbin]
obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1
else:
forecast_values = sub_forecasts[self.forecast_bins[model_type].astype(str)[0]].values
obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1
roc_obj.update(forecast_values, obs_values)
return roc_obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None):
""" Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object. """ |
if query is not None:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query)
dist_forecasts = dist_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name]
condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))
areas = dist_forecasts["Area"].values
for f in np.arange(dist_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
if condition_prob >= condition_threshold:
max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values,
size=(num_samples, areas[f])).max(axis=1))
return max_hail_samples |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_params(self):
"""Get signature and params """ |
params = {
'key': self.get_app_key(),
'uid': self.user_id,
'widget': self.widget_code
}
products_number = len(self.products)
if self.get_api_type() == self.API_GOODS:
if isinstance(self.products, list):
if products_number == 1:
product = self.products[0]
if isinstance(product, Product):
post_trial_product = None
if isinstance(product.get_trial_product(), Product):
post_trial_product = product
product = product.get_trial_product()
params['amount'] = product.get_amount()
params['currencyCode'] = product.get_currency_code()
params['ag_name'] = product.get_name()
params['ag_external_id'] = product.get_id()
params['ag_type'] = product.get_type()
if product.get_type() == Product.TYPE_SUBSCRIPTION:
params['ag_period_length'] = product.get_period_length()
params['ag_period_type'] = product.get_period_type()
if product.is_recurring():
params['ag_recurring'] = 1 if product.is_recurring() else 0
if post_trial_product:
params['ag_trial'] = 1
params['ag_post_trial_external_id'] = post_trial_product.get_id()
params['ag_post_trial_period_length'] = post_trial_product.get_period_length()
params['ag_post_trial_period_type'] = post_trial_product.get_period_type()
params['ag_post_trial_name'] = post_trial_product.get_name()
params['post_trial_amount'] = post_trial_product.get_amount()
params['post_trial_currencyCode'] = post_trial_product.get_currency_code()
else:
self.append_to_errors('Not a Product instance')
else:
self.append_to_errors('Only 1 product is allowed')
elif self.get_api_type() == self.API_CART:
index = 0
for product in self.products:
params['external_ids[' + str(index) + ']'] = product.get_id()
if product.get_amount() > 0:
params['prices[' + str(index) + ']'] = product.get_amount()
if product.get_currency_code() != '' and product.get_currency_code() is not None:
params['currencies[' + str(index) + ']'] = product.get_currency_code()
index += 1
params['sign_version'] = signature_version = str(self.get_default_widget_signature())
if not self.is_empty(self.extra_params, 'sign_version'):
signature_version = params['sign_version'] = str(self.extra_params['sign_version'])
params = self.array_merge(params, self.extra_params)
params['sign'] = self.calculate_signature(params, self.get_secret_key(), int(signature_version))
return params |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_forecasts(self):
""" Load the forecast files into memory. """ |
run_date_str = self.run_date.strftime("%Y%m%d")
for model_name in self.model_names:
self.raw_forecasts[model_name] = {}
forecast_file = self.forecast_path + run_date_str + "/" + \
model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str)
forecast_obj = Dataset(forecast_file)
forecast_hours = forecast_obj.variables["forecast_hour"][:]
valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0]
for size_threshold in self.size_thresholds:
self.raw_forecasts[model_name][size_threshold] = \
forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices]
forecast_obj.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_window_forecasts(self):
""" Aggregate the forecasts within the specified time windows. """ |
for model_name in self.model_names:
self.window_forecasts[model_name] = {}
for size_threshold in self.size_thresholds:
self.window_forecasts[model_name][size_threshold] = \
np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0)
for sl in self.hour_windows]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dilate_obs(self, dilation_radius):
""" Use a dilation filter to grow positive observation areas by a specified number of grid points :param dilation_radius: Number of times to dilate the grid. :return: """ |
for s in self.size_thresholds:
self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape)
for t in range(self.dilated_obs[s].shape[0]):
self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def roc_curves(self, prob_thresholds):
""" Generate ROC Curve objects for each machine learning model, size threshold, and time window. :param prob_thresholds: Probability thresholds for the ROC Curve :param dilation_radius: Number of times to dilate the observation grid. :return: a dictionary of DistributedROC objects. """ |
all_roc_curves = {}
for model_name in self.model_names:
all_roc_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_roc_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = (hour_window.start, hour_window.stop)
all_roc_curves[model_name][size_threshold][hour_range] = \
DistributedROC(prob_thresholds, 1)
if self.obs_mask:
all_roc_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h][
self.window_obs[self.mask_variable][h] > 0],
self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0]
)
else:
all_roc_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h],
self.dilated_obs[size_threshold][h]
)
return all_roc_curves |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reliability_curves(self, prob_thresholds):
""" Output reliability curves for each machine learning model, size threshold, and time window. :param prob_thresholds: :param dilation_radius: :return: """ |
all_rel_curves = {}
for model_name in self.model_names:
all_rel_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_rel_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = (hour_window.start, hour_window.stop)
all_rel_curves[model_name][size_threshold][hour_range] = \
DistributedReliability(prob_thresholds, 1)
if self.obs_mask:
all_rel_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h][
self.window_obs[self.mask_variable][h] > 0],
self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0]
)
else:
all_rel_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h],
self.dilated_obs[size_threshold][h]
)
return all_rel_curves |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_map_coordinates(map_file):
""" Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays. """ |
if map_file[-4:] == ".pkl":
map_data = pickle.load(open(map_file))
lon = map_data['lon']
lat = map_data['lat']
else:
map_data = Dataset(map_file)
if "lon" in map_data.variables.keys():
lon = map_data.variables['lon'][:]
lat = map_data.variables['lat'][:]
else:
lon = map_data.variables["XLONG"][0]
lat = map_data.variables["XLAT"][0]
return lon, lat |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data(self):
""" Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ |
data = []
loaded_dates = []
loaded_indices = []
for t, timestamp in enumerate(self.all_dates):
date_str = timestamp.date().strftime("%Y%m%d")
full_path = self.path_start + date_str + "/"
if self.variable in os.listdir(full_path):
full_path += self.variable + "/"
data_files = sorted(os.listdir(full_path))
file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files])
if timestamp in file_dates:
data_file = data_files[np.where(timestamp==file_dates)[0][0]]
print(full_path + data_file)
if data_file[-2:] == "gz":
subprocess.call(["gunzip", full_path + data_file])
file_obj = Nio.open_file(full_path + data_file[:-3])
else:
file_obj = Nio.open_file(full_path + data_file)
var_name = sorted(file_obj.variables.keys())[0]
data.append(file_obj.variables[var_name][:])
if self.lon is None:
self.lon = file_obj.variables["lon_0"][:]
# Translates longitude values from 0:360 to -180:180
if np.count_nonzero(self.lon > 180) > 0:
self.lon -= 360
self.lat = file_obj.variables["lat_0"][:]
file_obj.close()
if data_file[-2:] == "gz":
subprocess.call(["gzip", full_path + data_file[:-3]])
else:
subprocess.call(["gzip", full_path + data_file])
loaded_dates.append(timestamp)
loaded_indices.append(t)
if len(loaded_dates) > 0:
self.loaded_dates = pd.DatetimeIndex(loaded_dates)
self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999
self.data[loaded_indices] = np.array(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interpolate_grid(self, in_lon, in_lat):
""" Interpolates MRMS data to a different grid using cubic bivariate splines """ |
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
for d in range(self.data.shape[0]):
print("Loading ", d, self.variable, self.start_date)
if self.data[d].max() > -999:
step = self.data[d]
step[step < 0] = 0
if self.lat[-1] < self.lat[0]:
spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3)
else:
spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3)
print("Evaluating", d, self.variable, self.start_date)
flat_data = spline.ev(in_lat.ravel(), in_lon.ravel())
out_data[d] = flat_data.reshape(in_lon.shape)
del spline
else:
print(d, " is missing")
out_data[d] = -9999
return out_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def max_neighbor(self, in_lon, in_lat, radius=0.05):
""" Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """ |
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)
out_indices = np.indices(out_data.shape[1:])
out_rows = out_indices[0].ravel()
out_cols = out_indices[1].ravel()
for d in range(self.data.shape[0]):
nz_points = np.where(self.data[d] > 0)
if len(nz_points[0]) > 0:
nz_vals = self.data[d][nz_points]
nz_rank = np.argsort(nz_vals)
original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T)
all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0)
for n, neighbors in enumerate(all_neighbors):
if len(neighbors) > 0:
out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n]
return out_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"):
""" Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ |
if interp_type == "spline":
out_data = self.interpolate_grid(in_lon, in_lat)
else:
out_data = self.max_neighbor(in_lon, in_lat)
if not os.access(out_path + self.variable, os.R_OK):
try:
os.mkdir(out_path + self.variable)
except OSError:
print(out_path + self.variable + " already created")
out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable,
self.start_date.strftime("%Y%m%d-%H:%M"),
self.end_date.strftime("%Y%m%d-%H:%M"))
out_obj = Dataset(out_file, "w")
out_obj.createDimension("time", out_data.shape[0])
out_obj.createDimension("y", out_data.shape[1])
out_obj.createDimension("x", out_data.shape[2])
data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True,
fill_value=-9999.0,
least_significant_digit=3)
data_var[:] = out_data
data_var.long_name = self.variable
data_var.coordinates = "latitude longitude"
if "MESH" in self.variable or "QPE" in self.variable:
data_var.units = "mm"
elif "Reflectivity" in self.variable:
data_var.units = "dBZ"
elif "Rotation" in self.variable:
data_var.units = "s-1"
else:
data_var.units = ""
out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True)
out_lon[:] = in_lon
out_lon.units = "degrees_east"
out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True)
out_lat[:] = in_lat
out_lat.units = "degrees_north"
dates = out_obj.createVariable("time", "i8", ("time",), zlib=True)
dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64)
dates.long_name = "Valid date"
dates.units = date_unit
out_obj.Conventions="CF-1.6"
out_obj.close()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data_generator_by_id(hardware_source_id, sync=True):
""" Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ |
hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id)
def get_last_data():
return hardware_source.get_next_xdatas_to_finish()[0].data.copy()
yield get_last_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_hardware_aliases_config_file(config_path):
""" Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """ |
if os.path.exists(config_path):
logging.info("Parsing alias file {:s}".format(config_path))
try:
config = configparser.ConfigParser()
config.read(config_path)
for section in config.sections():
device = config.get(section, "device")
hardware_alias = config.get(section, "hardware_alias")
display_name = config.get(section, "display_name")
try:
logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name))
HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name))
except Exception as e:
logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device))
logging.info(traceback.format_exc())
except Exception as e:
logging.info("Error reading alias file from: " + config_path)
logging.info(traceback.format_exc())
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name):
""" Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """ |
self.__aliases[alias_instrument_id] = (instrument_id, display_name)
for f in self.aliases_updated:
f() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None: """Called from hardware source when new data arrives.""" |
self.__state = state
self.__sub_area = sub_area
hardware_source_id = self.__hardware_source.hardware_source_id
channel_index = self.index
channel_id = self.channel_id
channel_name = self.name
metadata = copy.deepcopy(data_and_metadata.metadata)
hardware_source_metadata = dict()
hardware_source_metadata["hardware_source_id"] = hardware_source_id
hardware_source_metadata["channel_index"] = channel_index
if channel_id is not None:
hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id])
hardware_source_metadata["channel_id"] = channel_id
else:
hardware_source_metadata["reference_key"] = hardware_source_id
if channel_name is not None:
hardware_source_metadata["channel_name"] = channel_name
if view_id:
hardware_source_metadata["view_id"] = view_id
metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata)
data = data_and_metadata.data
master_data = self.__data_and_metadata.data if self.__data_and_metadata else None
data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype
if data_matches and sub_area is not None:
top = sub_area[0][0]
bottom = sub_area[0][0] + sub_area[1][0]
left = sub_area[0][1]
right = sub_area[0][1] + sub_area[1][1]
if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]:
master_data = numpy.copy(master_data)
master_data[top:bottom, left:right] = data[top:bottom, left:right]
else:
master_data = numpy.copy(data)
else:
master_data = data # numpy.copy(data). assume data does not need a copy.
data_descriptor = data_and_metadata.data_descriptor
intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None
dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None
timestamp = data_and_metadata.timestamp
new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
self.__data_and_metadata = new_extended_data
self.data_channel_updated_event.fire(new_extended_data)
self.is_dirty = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Called from hardware source when data starts streaming.""" |
old_start_count = self.__start_count
self.__start_count += 1
if old_start_count == 0:
self.data_channel_start_event.fire() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect_data_item_reference(self, data_item_reference):
"""Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """ |
display_item = data_item_reference.display_item
data_item = display_item.data_item if display_item else None
if data_item and display_item:
self.__connect_display(display_item)
else:
def data_item_reference_changed():
self.__data_item_reference_changed_event_listener.close()
self.connect_data_item_reference(data_item_reference) # ugh. recursive mess.
self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available.""" |
timeout = timeout if timeout is not None else 10.0
with self.__buffer_lock:
if len(self.__buffer) == 0:
done_event = threading.Event()
self.__done_events.append(done_event)
self.__buffer_lock.release()
done = done_event.wait(timeout)
self.__buffer_lock.acquire()
if not done:
raise Exception("Could not grab latest.")
return self.__buffer.pop(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available.""" |
with self.__buffer_lock:
self.__buffer = list()
return self.grab_latest(timeout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available.""" |
self.grab_next(timeout)
return self.grab_next(timeout) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" |
with self.__state_lock:
if self.__state == DataChannelBuffer.State.started:
self.__state = DataChannelBuffer.State.paused |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe.""" |
with self.__state_lock:
if self.__state == DataChannelBuffer.State.paused:
self.__state = DataChannelBuffer.State.started |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlargest(n, mapping):
""" Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """ |
try:
it = mapping.iteritems()
except AttributeError:
it = iter(mapping.items())
pq = minpq()
try:
for i in range(n):
pq.additem(*next(it))
except StopIteration:
pass
try:
while it:
pq.pushpopitem(*next(it))
except StopIteration:
pass
out = list(pq.popkeys())
out.reverse()
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fromkeys(cls, iterable, value, **kwargs):
""" Return a new pqict mapping keys from an iterable to the same value. """ |
return cls(((k, value) for k in iterable), **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
""" Return a shallow copy of a pqdict. """ |
return self.__class__(self, key=self._keyfn, precedes=self._precedes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pop(self, key=__marker, default=__marker):
""" If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """ |
heap = self._heap
position = self._position
# pq semantics: remove and return top *key* (value is discarded)
if key is self.__marker:
if not heap:
raise KeyError('pqdict is empty')
key = heap[0].key
del self[key]
return key
# dict semantics: remove and return *value* mapped from key
try:
pos = position.pop(key) # raises KeyError
except KeyError:
if default is self.__marker:
raise
return default
else:
node_to_delete = heap[pos]
end = heap.pop()
if end is not node_to_delete:
heap[pos] = end
position[end.key] = pos
self._reheapify(pos)
value = node_to_delete.value
del node_to_delete
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def popitem(self):
""" Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ |
heap = self._heap
position = self._position
try:
end = heap.pop(-1)
except IndexError:
raise KeyError('pqdict is empty')
if heap:
node = heap[0]
heap[0] = end
position[end.key] = 0
self._sink(0)
else:
node = end
del position[node.key]
return node.key, node.value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def topitem(self):
""" Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ |
try:
node = self._heap[0]
except IndexError:
raise KeyError('pqdict is empty')
return node.key, node.value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def additem(self, key, value):
""" Add a new item. Raises ``KeyError`` if key is already in the pqdict. """ |
if key in self._position:
raise KeyError('%s is already in the queue' % repr(key))
self[key] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pushpopitem(self, key, value, node_factory=_Node):
""" Equivalent to inserting a new item followed by removing the top priority item, but faster. Raises ``KeyError`` if the new key is already in the pqdict. """ |
heap = self._heap
position = self._position
precedes = self._precedes
prio = self._keyfn(value) if self._keyfn else value
node = node_factory(key, value, prio)
if key in self:
raise KeyError('%s is already in the queue' % repr(key))
if heap and precedes(heap[0].prio, node.prio):
node, heap[0] = heap[0], node
position[key] = 0
del position[node.key]
self._sink(0)
return node.key, node.value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def updateitem(self, key, new_val):
""" Update the priority value of an existing item. Raises ``KeyError`` if key is not in the pqdict. """ |
if key not in self._position:
raise KeyError(key)
self[key] = new_val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_key(self, key, new_key):
""" Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict. """ |
heap = self._heap
position = self._position
if new_key in self:
raise KeyError('%s is already in the queue' % repr(new_key))
pos = position.pop(key) # raises appropriate KeyError
position[new_key] = pos
heap[pos].key = new_key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def swap_priority(self, key1, key2):
""" Fast way to swap the priority level of two items in the pqdict. Raises ``KeyError`` if either key does not exist. """ |
heap = self._heap
position = self._position
if key1 not in self or key2 not in self:
raise KeyError
pos1, pos2 = position[key1], position[key2]
heap[pos1].key, heap[pos2].key = key2, key1
position[key1], position[key2] = pos2, pos1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def heapify(self, key=__marker):
""" Repair a broken heap. If the state of an item's priority value changes you can re-sort the relevant item only by providing ``key``. """ |
if key is self.__marker:
n = len(self._heap)
for pos in reversed(range(n//2)):
self._sink(pos)
else:
try:
pos = self._position[key]
except KeyError:
raise KeyError(key)
self._reheapify(pos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def package_has_version_file(package_name):
""" Check to make sure _version.py is contained in the package """ |
version_file_path = helpers.package_file_path('_version.py', package_name)
return os.path.isfile(version_file_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_project_name():
""" Grab the project name out of setup.py """ |
setup_py_content = helpers.get_file_content('setup.py')
ret = helpers.value_of_named_argument_in_function(
'name', 'setup', setup_py_content, resolve_varname=True
)
if ret and ret[0] == ret[-1] in ('"', "'"):
ret = ret[1:-1]
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_version(package_name, ignore_cache=False):
""" Get the version which is currently configured by the package """ |
if ignore_cache:
with microcache.temporarily_disabled():
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
else:
found = helpers.regex_in_package_file(
VERSION_SET_REGEX, '_version.py', package_name, return_match=True
)
if found is None:
raise ProjectError('found {}, but __version__ is not defined')
current_version = found['version']
return current_version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_version(package_name, version_str):
""" Set the version in _version.py to version_str """ |
current_version = get_version(package_name)
version_file_path = helpers.package_file_path('_version.py', package_name)
version_file_content = helpers.get_file_content(version_file_path)
version_file_content = version_file_content.replace(current_version, version_str)
with open(version_file_path, 'w') as version_file:
version_file.write(version_file_content) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def version_is_valid(version_str):
""" Check to see if the version specified is a valid as far as pkg_resources is concerned False True """ |
try:
packaging.version.Version(version_str)
except packaging.version.InvalidVersion:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """ |
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using pypicloud api to find all versions """ |
api_url = index_url
for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'):
if api_url.endswith(suffix):
api_url = api_url[:len(suffix) * -1] + '/api/package'
break
url = '/'.join((api_url, project_name))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return [p['version'] for p in response.json()['packages']]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True):
""" Check to see if the version specified has already been uploaded to the configured index """ |
all_versions = _get_uploaded_versions(project_name, index_url, requests_verify)
return version_str in all_versions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_readme_to_rst():
""" Attempt to convert a README.md file into README.rst """ |
project_files = os.listdir('.')
for filename in project_files:
if filename.lower() == 'readme':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'not sure what to do with it, refusing to convert'
)
elif filename.lower() == 'readme.rst':
raise ProjectError(
'found {} in project directory...'.format(filename) +
'refusing to overwrite'
)
for filename in project_files:
if filename.lower() == 'readme.md':
rst_filename = 'README.rst'
logger.info('converting {} to {}'.format(filename, rst_filename))
try:
rst_content = pypandoc.convert(filename, 'rst')
with open('README.rst', 'w') as rst_file:
rst_file.write(rst_content)
return
except OSError as e:
raise ProjectError(
'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e)
)
raise ProjectError('could not find any README.md file to convert') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_packaged_files(package_name):
""" Collect relative paths to all files which have already been packaged """ |
if not os.path.isdir('dist'):
return []
return [os.path.join('dist', filename) for filename in os.listdir('dist')] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multiple_packaged_versions(package_name):
""" Look through built package directory and see if there are multiple versions there """ |
dist_files = os.listdir('dist')
versions = set()
for filename in dist_files:
version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename)
if version:
versions.add(version)
return len(versions) > 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time):
""" Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities) """ |
neighbor_x = self.x[::stride, ::stride]
neighbor_y = self.y[::stride, ::stride]
neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T)
neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1]))
print('Forecast Hours: {0}-{1}'.format(start_time, end_time))
for m in range(len(self.members)):
period_max = self.data[m,start_time:end_time,:,:].max(axis=0)
valid_i, valid_j = np.where(period_max >= threshold)
print(self.members[m], len(valid_i))
if len(valid_i) > 0:
var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T)
exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int)
exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape)
neighbor_prob[m][exceed_i, exceed_j] = 1
if smoothing > 0:
neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant')
return neighbor_prob |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_map_info(self, map_file):
""" Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection. Args: map_file: File specifying the projection information. """ |
if self.ensemble_name.upper() == "SSEF":
proj_dict, grid_dict = read_arps_map_file(map_file)
self.dx = int(grid_dict["dx"])
mapping_data = make_proj_grids(proj_dict, grid_dict)
for m, v in mapping_data.items():
setattr(self, m, v)
self.i, self.j = np.indices(self.lon.shape)
self.proj = get_proj_obj(proj_dict)
elif self.ensemble_name.upper() in ["NCAR", "NCARSTORM", "HRRR", "VSE", "HREFV2"]:
proj_dict, grid_dict = read_ncar_map_file(map_file)
if self.member_name[0:7] == "1km_pbl": # Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1'
grid_dict["dx"] = 1000
grid_dict["dy"] = 1000
grid_dict["sw_lon"] = 258.697
grid_dict["sw_lat"] = 23.999
grid_dict["ne_lon"] = 282.868269206236
grid_dict["ne_lat"] = 36.4822338520542
self.dx = int(grid_dict["dx"])
mapping_data = make_proj_grids(proj_dict, grid_dict)
for m, v in mapping_data.items():
setattr(self, m, v)
self.i, self.j = np.indices(self.lon.shape)
self.proj = get_proj_obj(proj_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_geojson(filename):
""" Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject """ |
json_file = open(filename)
data = json.load(json_file)
json_file.close()
times = data["properties"]["times"]
main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[])
attribute_data = dict()
for feature in data["features"]:
for main_name in main_data.keys():
main_data[main_name].append(np.array(feature["properties"][main_name]))
for k, v in feature["properties"]["attributes"].items():
if k not in attribute_data.keys():
attribute_data[k] = [np.array(v)]
else:
attribute_data[k].append(np.array(v))
kwargs = {}
for kw in ["dx", "step", "u", "v"]:
if kw in data["properties"].keys():
kwargs[kw] = data["properties"][kw]
sto = STObject(main_data["timesteps"], main_data["masks"], main_data["x"], main_data["y"],
main_data["i"], main_data["j"], times[0], times[-1], **kwargs)
for k, v in attribute_data.items():
sto.attributes[k] = v
return sto |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def center_of_mass(self, time):
""" Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass. """ |
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
valid = np.flatnonzero(self.masks[diff] != 0)
if valid.size > 0:
com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *
self.x[diff].ravel()[valid])
com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] *
self.y[diff].ravel()[valid])
else:
com_x = np.mean(self.x[diff])
com_y = np.mean(self.y[diff])
else:
com_x = None
com_y = None
return com_x, com_y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trajectory(self):
""" Calculates the center of mass for each time step and outputs an array Returns: """ |
traj = np.zeros((2, self.times.size))
for t, time in enumerate(self.times):
traj[:, t] = self.center_of_mass(time)
return traj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_corner(self, time):
""" Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index. """ |
if self.start_time <= time <= self.end_time:
diff = time - self.start_time
return self.i[diff][0, 0], self.j[diff][0, 0]
else:
return -1, -1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def size(self, time):
""" Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels """ |
if self.start_time <= time <= self.end_time:
return self.masks[time - self.start_time].sum()
else:
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def max_intensity(self, time):
""" Calculate the maximum intensity found at a timestep. """ |
ti = np.where(time == self.times)[0][0]
return self.timesteps[ti].max() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def boundary_polygon(self, time):
""" Get coordinates of object boundary in counter-clockwise order """ |
ti = np.where(time == self.times)[0][0]
com_x, com_y = self.center_of_mass(time)
# If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works.
# But if all perimeter points are masked, find_boundaries() does not find the object.
# Therefore, pad the mask with zeroes first and run find_boundaries on the padded array.
padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0)
chull = convex_hull_image(padded_mask)
boundary_image = find_boundaries(chull, mode='inner', background=0)
# Now remove the padding.
boundary_image = boundary_image[1:-1,1:-1]
boundary_x = self.x[ti].ravel()[boundary_image.ravel()]
boundary_y = self.y[ti].ravel()[boundary_image.ravel()]
r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2)
theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360
polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')])
coord_order = np.argsort(polar_coords, order=['theta', 'r'])
ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]])
return ordered_coords |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def estimate_motion(self, time, intensity_grid, max_u, max_v):
""" Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error. """ |
ti = np.where(time == self.times)[0][0]
mask_vals = np.where(self.masks[ti].ravel() == 1)
i_vals = self.i[ti].ravel()[mask_vals]
j_vals = self.j[ti].ravel()[mask_vals]
obj_vals = self.timesteps[ti].ravel()[mask_vals]
u_shifts = np.arange(-max_u, max_u + 1)
v_shifts = np.arange(-max_v, max_v + 1)
min_error = 99999999999.0
best_u = 0
best_v = 0
for u in u_shifts:
j_shift = j_vals - u
for v in v_shifts:
i_shift = i_vals - v
if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) &
(0 <= j_shift) & (j_shift < intensity_grid.shape[1])):
shift_vals = intensity_grid[i_shift, j_shift]
else:
shift_vals = np.zeros(i_shift.shape)
# This isn't correlation; it is mean absolute error.
error = np.abs(shift_vals - obj_vals).mean()
if error < min_error:
min_error = error
best_u = u * self.dx
best_v = v * self.dx
# 60 seems arbitrarily high
#if min_error > 60:
# best_u = 0
# best_v = 0
self.u[ti] = best_u
self.v[ti] = best_v
return best_u, best_v, min_error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def count_overlap(self, time, other_object, other_time):
""" Counts the number of points that overlap between this STObject and another STObject. Used for tracking. """ |
ti = np.where(time == self.times)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
oti = np.where(other_time == other_object.times)[0]
obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)])
other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)])
obj_coords['x'] = self.i[ti].ravel()[ma]
obj_coords['y'] = self.j[ti].ravel()[ma]
other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1]
other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1]
return float(np.intersect1d(obj_coords,
other_obj_coords).size) / np.maximum(self.masks[ti].sum(),
other_object.masks[oti].sum()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_attribute_array(self, data_array, var_name):
""" Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array """ |
if var_name not in self.attributes.keys():
self.attributes[var_name] = []
for t in range(self.times.size):
self.attributes[var_name].append(data_array[self.i[t], self.j[t]]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_tendency_grid(self, model_grid):
""" Extracts the difference in model outputs Args: model_grid: ModelOutput or ModelGrid object. """ |
var_name = model_grid.variable + "-tendency"
self.attributes[var_name] = []
timesteps = np.arange(self.start_time, self.end_time + 1)
for ti, t in enumerate(timesteps):
t_index = t - model_grid.start_hour
self.attributes[var_name].append(
model_grid.data[t_index, self.i[ti], self.j[ti]] - model_grid.data[t_index - 1, self.i[ti], self.j[ti]]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_timestep_statistic(self, statistic, time):
""" Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic """ |
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()
elif statistic == 'median':
stat_val = np.median(self.timesteps[ti].ravel()[ma])
elif 'percentile' in statistic:
per = int(statistic.split("_")[1])
stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)
elif 'dt' in statistic:
stat_name = statistic[:-3]
if ti == 0:
stat_val = 0
else:
stat_val = self.calc_timestep_statistic(stat_name, time) -\
self.calc_timestep_statistic(stat_name, time - 1)
else:
stat_val = np.nan
return stat_val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_shape_step(self, stat_names, time):
""" Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics """ |
ti = np.where(self.times == time)[0][0]
props = regionprops(self.masks[ti], self.timesteps[ti])[0]
shape_stats = []
for stat_name in stat_names:
if "moments_hu" in stat_name:
hu_index = int(stat_name.split("_")[-1])
hu_name = "_".join(stat_name.split("_")[:-1])
hu_val = np.log(props[hu_name][hu_index])
if np.isnan(hu_val):
shape_stats.append(0)
else:
shape_stats.append(hu_val)
else:
shape_stats.append(props[stat_name])
return shape_stats |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_geojson(self, filename, proj, metadata=None):
""" Output the data in the STObject to a geoJSON file. Args: filename: Name of the file proj: PyProj object for converting the x and y coordinates back to latitude and longitue values. metadata: Metadata describing the object to be included in the top-level properties. """ |
if metadata is None:
metadata = {}
json_obj = {"type": "FeatureCollection", "features": [], "properties": {}}
json_obj['properties']['times'] = self.times.tolist()
json_obj['properties']['dx'] = self.dx
json_obj['properties']['step'] = self.step
json_obj['properties']['u'] = self.u.tolist()
json_obj['properties']['v'] = self.v.tolist()
for k, v in metadata.items():
json_obj['properties'][k] = v
for t, time in enumerate(self.times):
feature = {"type": "Feature",
"geometry": {"type": "Polygon"},
"properties": {}}
boundary_coords = self.boundary_polygon(time)
lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True))
lonlat_list = lonlat.T.tolist()
if len(lonlat_list) > 0:
lonlat_list.append(lonlat_list[0])
feature["geometry"]["coordinates"] = [lonlat_list]
for attr in ["timesteps", "masks", "x", "y", "i", "j"]:
feature["properties"][attr] = getattr(self, attr)[t].tolist()
feature["properties"]["attributes"] = {}
for attr_name, steps in self.attributes.items():
feature["properties"]["attributes"][attr_name] = steps[t].tolist()
json_obj['features'].append(feature)
file_obj = open(filename, "w")
json.dump(json_obj, file_obj, indent=1, sort_keys=True)
file_obj.close()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def model(self, v=None):
"Returns the model of node v"
if v is None:
v = self.estopping
hist = self.hist
trace = self.trace(v)
ins = None
if self._base._probability_calibration is not None:
node = hist[-1]
node.normalize()
X = np.array([x.full_array() for x in node.hy]).T
y = np.array(self._base._y_klass.full_array())
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.array(self._base._mask_ts.index)] = False
ins = self._base._probability_calibration().fit(X[mask], y[mask])
if self._classifier:
nclasses = self._labels.shape[0]
else:
nclasses = None
m = Model(trace, hist, nvar=self._base._nvar,
classifier=self._classifier, labels=self._labels,
probability_calibration=ins, nclasses=nclasses)
return m |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def trace(self, n):
"Restore the position in the history of individual v's nodes"
trace_map = {}
self._trace(n, trace_map)
s = list(trace_map.keys())
s.sort()
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tournament(self, negative=False):
"""Tournament selection and when negative is True it performs negative tournament selection""" |
if self.generation <= self._random_generations and not negative:
return self.random_selection()
if not self._negative_selection and negative:
return self.random_selection(negative=negative)
vars = self.random()
fit = [(k, self.population[x].fitness) for k, x in enumerate(vars)]
if negative:
fit = min(fit, key=lambda x: x[1])
else:
fit = max(fit, key=lambda x: x[1])
index = fit[0]
return vars[index] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_population(self):
"Create the initial population"
base = self._base
if base._share_inputs:
used_inputs_var = SelectNumbers([x for x in range(base.nvar)])
used_inputs_naive = used_inputs_var
if base._pr_variable == 0:
used_inputs_var = SelectNumbers([])
used_inputs_naive = SelectNumbers([x for x in range(base.nvar)])
elif base._pr_variable == 1:
used_inputs_var = SelectNumbers([x for x in range(base.nvar)])
used_inputs_naive = SelectNumbers([])
else:
used_inputs_var = SelectNumbers([x for x in range(base.nvar)])
used_inputs_naive = SelectNumbers([x for x in range(base.nvar)])
nb_input = Inputs(base, used_inputs_naive, functions=base._input_functions)
while ((base._all_inputs and not base.stopping_criteria_tl()) or
(self.popsize < base.popsize and
not base.stopping_criteria())):
if base._all_inputs and used_inputs_var.empty() and used_inputs_naive.empty():
base._init_popsize = self.popsize
break
if nb_input.use_all_variables():
v = nb_input.all_variables()
if v is None:
continue
elif not used_inputs_var.empty() and np.random.random() < base._pr_variable:
v = self.variable_input(used_inputs_var)
if v is None:
used_inputs_var.pos = used_inputs_var.size
continue
elif not used_inputs_naive.empty():
v = nb_input.input()
if not used_inputs_var.empty() and used_inputs_naive.empty():
base._pr_variable = 1
if v is None:
used_inputs_naive.pos = used_inputs_naive.size
if not used_inputs_var.empty():
base._pr_variable = 1
continue
else:
gen = self.generation
self.generation = 0
v = base.random_offspring()
self.generation = gen
self.add(v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def add(self, v):
"Add an individual to the population"
self.population.append(v)
self._current_popsize += 1
v.position = len(self._hist)
self._hist.append(v)
self.bsf = v
self.estopping = v
self._density += self.get_density(v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace(self, v):
"""Replace an individual selected by negative tournament selection with individual v""" |
if self.popsize < self._popsize:
return self.add(v)
k = self.tournament(negative=True)
self.clean(self.population[k])
self.population[k] = v
v.position = len(self._hist)
self._hist.append(v)
self.bsf = v
self.estopping = v
self._inds_replace += 1
self._density += self.get_density(v)
if self._inds_replace == self._popsize:
self._inds_replace = 0
self.generation += 1
gc.collect() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_directory_if_needed(directory_path):
""" Make the directory path, if needed. """ |
if os.path.exists(directory_path):
if not os.path.isdir(directory_path):
raise OSError("Path is not a directory:", directory_path)
else:
os.makedirs(directory_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hatchery():
""" Main entry point for the hatchery program """ |
args = docopt.docopt(__doc__)
task_list = args['<task>']
if not task_list or 'help' in task_list or args['--help']:
print(__doc__.format(version=_version.__version__, config_files=config.CONFIG_LOCATIONS))
return 0
level_str = args['--log-level']
try:
level_const = getattr(logging, level_str.upper())
logging.basicConfig(level=level_const)
if level_const == logging.DEBUG:
workdir.options.debug = True
except LookupError:
logging.basicConfig()
logger.error('received invalid log level: ' + level_str)
return 1
for task in task_list:
if task not in ORDERED_TASKS:
logger.info('starting task: check')
logger.error('received invalid task: ' + task)
return 1
for task in CHECK_TASKS:
if task in task_list:
task_check(args)
break
if 'package' in task_list and not args['--release-version']:
logger.error('--release-version is required for the package task')
return 1
config_dict = _get_config_or_die(
calling_task='hatchery',
required_params=['auto_push_tag']
)
if config_dict['auto_push_tag'] and 'upload' in task_list:
logger.info('adding task: tag (auto_push_tag==True)')
task_list.append('tag')
# all commands will raise a SystemExit if they fail
# check will have already been run
for task in ORDERED_TASKS:
if task in task_list and task != 'check':
logger.info('starting task: ' + task)
globals()['task_' + task](args)
logger.info("all's well that ends well...hatchery out")
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple Command can be passed in as either a string or iterable 0 1 """ |
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup(cmd_args, suppress_output=False):
""" Call a setup.py command or list of commands 0 1 """ |
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
cmd_args = [sys.executable, 'setup.py'] + [x for x in cmd_args]
return call(cmd_args, suppress_output=suppress_output) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data(self):
""" Loads data files and stores the output in the data attribute. """ |
data = []
valid_dates = []
mrms_files = np.array(sorted(os.listdir(self.path + self.variable + "/")))
mrms_file_dates = np.array([m_file.split("_")[-2].split("-")[0]
for m_file in mrms_files])
old_mrms_file = None
file_obj = None
for t in range(self.all_dates.shape[0]):
file_index = np.where(mrms_file_dates == self.all_dates[t].strftime("%Y%m%d"))[0]
if len(file_index) > 0:
mrms_file = mrms_files[file_index][0]
if mrms_file is not None:
if file_obj is not None:
file_obj.close()
file_obj = Dataset(self.path + self.variable + "/" + mrms_file)
#old_mrms_file = mrms_file
if "time" in file_obj.variables.keys():
time_var = "time"
else:
time_var = "date"
file_valid_dates = pd.DatetimeIndex(num2date(file_obj.variables[time_var][:],
file_obj.variables[time_var].units))
else:
file_valid_dates = pd.DatetimeIndex([])
time_index = np.where(file_valid_dates.values == self.all_dates.values[t])[0]
if len(time_index) > 0:
data.append(file_obj.variables[self.variable][time_index[0]])
valid_dates.append(self.all_dates[t])
if file_obj is not None:
file_obj.close()
self.data = np.array(data)
self.data[self.data < 0] = 0
self.data[self.data > 150] = 150
self.valid_dates = pd.DatetimeIndex(valid_dates) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0):
""" Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray """ |
return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def label(self, input_grid):
""" Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray):
Grid to be labeled. Returns: Array of labeled pixels """ |
marked = self.find_local_maxima(input_grid)
marked = np.where(marked >= 0, 1, 0)
# splabel returns two things in a tuple: an array and an integer
# assign the first thing (array) to markers
markers = splabel(marked)[0]
return markers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_local_maxima(self, input_grid):
""" Finds the local maxima in the inputGrid and perform region growing to identify objects. Args: input_grid: Raw input data. Returns: array with labeled objects. """ |
pixels, q_data = self.quantize(input_grid)
centers = OrderedDict()
for p in pixels.keys():
centers[p] = []
marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED
MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size)))
MAX_INFL = 2 * MIN_INFL
marked_so_far = []
# Find the maxima. These are high-values with enough clearance
# around them.
# Work from high to low bins. The pixels in the highest bin mark their
# neighborhoods first. If you did it from low to high the lowest maxima
# would mark their neighborhoods first and interfere with the identification of higher maxima.
for b in sorted(pixels.keys(),reverse=True):
# Square starts large with high intensity bins and gets smaller with low intensity bins.
infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL)))
for p in pixels[b]:
if marked[p] == self.UNMARKED:
ok = False
del marked_so_far[:]
# Temporarily mark unmarked points in square around point (keep track of them in list marked_so_far).
# If none of the points in square were marked already from a higher intensity center,
# this counts as a new center and ok=True and points will remain marked.
# Otherwise ok=False and marked points that were previously unmarked will be unmarked.
for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1,
p[1] - infl_dist:p[1]+ infl_dist + 1]):
if v == self.UNMARKED:
ok = True
marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b
marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1]))
else:
# neighborhood already taken
ok = False
break
# ok if point and surrounding square were not marked already.
if ok:
# highest point in its neighborhood
centers[b].append(p)
else:
for m in marked_so_far:
marked[m] = self.UNMARKED
# Erase marks and start over. You have a list of centers now.
marked[:, :] = self.UNMARKED
deferred_from_last = []
deferred_to_next = []
# delta (int): maximum number of increments the cluster is allowed to range over. Larger d results in clusters over larger scales.
for delta in range(0, self.delta + 1):
# Work from high to low bins.
for b in sorted(centers.keys(), reverse=True):
bin_lower = b - delta
deferred_from_last[:] = deferred_to_next[:]
del deferred_to_next[:]
foothills = []
n_centers = len(centers[b])
tot_centers = n_centers + len(deferred_from_last)
for i in range(tot_centers):
# done this way to minimize memory overhead of maintaining two lists
if i < n_centers:
center = centers[b][i]
else:
center = deferred_from_last[i - n_centers]
if bin_lower < 0:
bin_lower = 0
if marked[center] == self.UNMARKED:
captured = self.set_maximum(q_data, marked, center, bin_lower, foothills)
if not captured:
# decrement to lower value to see if it'll get big enough
deferred_to_next.append(center)
else:
pass
# this is the last one for this bin
self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills)
del deferred_from_last[:]
del deferred_to_next[:]
return marked |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_maximum(self, q_data, marked, center, bin_lower, foothills):
""" Grow a region at a certain bin level and check if the region has reached the maximum size. Args: q_data: Quantized data array marked: Array marking points that are objects center: Coordinates of the center pixel of the region being grown bin_lower: Intensity level of lower bin being evaluated foothills: List of points that are associated with a center but fall outside the the size or intensity criteria Returns: True if the object is finished growing and False if the object should be grown again at the next threshold level. """ |
as_bin = [] # pixels to be included in peak
as_glob = [] # pixels to be globbed up as part of foothills
marked_so_far = [] # pixels that have already been marked
will_be_considered_again = False
as_bin.append(center)
center_data = q_data[center]
while len(as_bin) > 0:
p = as_bin.pop(-1) # remove and return last pixel in as_bin
if marked[p] != self.UNMARKED: # already processed
continue
marked[p] = q_data[center]
marked_so_far.append(p)
# check neighbors
for index,val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]):
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
pixel = (index[0] - 1 + p[0],index[1] - 1 + p[1])
p_data = q_data[pixel]
if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data):
will_be_considered_again = True
if p_data >= bin_lower and (np.abs(center_data - p_data) <= self.delta):
as_bin.append(pixel)
# Do not check that this is the closest: this way, a narrow channel of globbed pixels form
elif p_data >= 0:
as_glob.append(pixel)
if bin_lower == 0:
will_be_considered_again = False
big_enough = len(marked_so_far) >= self.max_size
if big_enough:
# remove lower values within region of influence
foothills.append((center, as_glob))
elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size
for m in marked_so_far:
marked[m] = self.UNMARKED
del as_bin[:]
del as_glob[:]
del marked_so_far[:]
return big_enough or (not will_be_considered_again) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills):
""" Mark points determined to be foothills as globbed, so that they are not included in future searches. Also searches neighboring points to foothill points to determine if they should also be considered foothills. Args: q_data: Quantized data marked: Marked bin_num: Current bin being searched bin_lower: Next bin being searched centers: dictionary of local maxima considered to be object centers foothills: List of foothill points being removed. """ |
hills = []
for foot in foothills:
center = foot[0]
hills[:] = foot[1][:]
# remove all foothills
while len(hills) > 0:
# mark this point
pt = hills.pop(-1)
marked[pt] = self.GLOBBED
for s_index, val in np.ndenumerate(marked[pt[0]-1:pt[0]+2,pt[1]-1:pt[1]+2]):
index = (s_index[0] - 1 + pt[0], s_index[1] - 1 + pt[1])
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
# will let in even minor peaks
if (q_data[index] >= 0) and \
(q_data[index] < bin_lower) and \
((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num)):
hills.append(index)
del foothills[:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quantize(self, input_grid):
""" Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data """ |
pixels = {}
for i in range(self.max_bin+1):
pixels[i] = []
data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment
data[data < 0] = -1
data[data > self.max_bin] = self.max_bin
good_points = np.where(data >= 0)
for g in np.arange(good_points[0].shape[0]):
pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g]))
return pixels, data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def content(self, **args):
'''
Doesn't require manual fetching of gistID of a gist
passing gistName will return the content of gist. In case,
names are ambigious, provide GistID or it will return the contents
of recent ambigious gistname
'''
self.gist_name = ''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid')
if self.gist_id:
r = requests.get(
'%s'%BASE_URL+'/gists/%s' %self.gist_id,
headers=self.gist.header
)
if (r.status_code == 200):
r_text = json.loads(r.text)
if self.gist_name!='':
content = r.json()['files'][self.gist_name]['content']
else:
for key,value in r.json()['files'].iteritems():
content = r.json()['files'][value['filename']]['content']
return content
raise Exception('No such gist found') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def edit(self, **args):
'''
Doesn't require manual fetching of gistID of a gist
passing gistName will return edit the gist
'''
self.gist_name = ''
if 'description' in args:
self.description = args['description']
else:
self.description = ''
if 'name' in args and 'id' in args:
self.gist_name = args['name']
self.gist_id = args['id']
elif 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Gist Name/ID must be provided')
if 'content' in args:
self.content = args['content']
else:
raise Exception('Gist content can\'t be empty')
if (self.gist_name == ''):
self.gist_name = self.getgist(id=self.gist_id)
data = {"description": self.description,
"files": {
self.gist_name: {
"content": self.content
}
}
}
else:
data = {"description": self.description,
"files": {
self.gist_name: {
"content": self.content
}
}
}
if self.gist_id:
r = requests.patch(
'%s/gists/%s'%(BASE_URL,self.gist_id),
headers=self.gist.header,
data=json.dumps(data),
)
if (r.status_code == 200):
r_text = json.loads(r.text)
response = {
'updated_content': self.content,
'created_at': r.json()['created_at'],
'comments':r.json()['comments']
}
return response
raise Exception('No such gist found') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def starred(self, **args):
'''
List the authenticated user's starred gists
'''
ids =[]
r = requests.get(
'%s/gists/starred'%BASE_URL,
headers=self.gist.header
)
if 'limit' in args:
limit = args['limit']
else:
limit = len(r.json())
if (r.status_code == 200):
for g in range(0,limit ):
ids.append('%s/%s/%s' %(GIST_URL,r.json()[g]['user']['login'],r.json()[g]['id']))
return ids
raise Exception('Username not found') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.