text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"): """ Initializes netCDF file for writing Args: filename: Name of the netCDF file time_units: Units for the time variable in format "<time> since <date string>" Returns: Dataset object """
if os.access(filename, os.R_OK): out_data = Dataset(filename, "r+") else: out_data = Dataset(filename, "w") if len(self.data.shape) == 2: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d]) else: for d, dim in enumerate(["y", "x"]): out_data.createDimension(dim, self.data.shape[d+1]) out_data.createDimension("time", len(self.times)) time_var = out_data.createVariable("time", "i8", ("time",)) time_var[:] = date2num(self.times.to_pydatetime(), time_units) time_var.units = time_units out_data.Conventions = "CF-1.6" return out_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_to_file(self, out_data): """ Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables are appended to the current file Args: out_data: Full-path and name of output netCDF file """
full_var_name = self.consensus_type + "_" + self.variable if "-hour" in self.consensus_type: if full_var_name not in out_data.variables.keys(): var = out_data.createVariable(full_var_name, "f4", ("y", "x"), zlib=True, least_significant_digit=3, shuffle=True) else: var = out_data.variables[full_var_name] var.coordinates = "y x" else: if full_var_name not in out_data.variables.keys(): var = out_data.createVariable(full_var_name, "f4", ("time", "y", "x"), zlib=True, least_significant_digit=3, shuffle=True) else: var = out_data.variables[full_var_name] var.coordinates = "time y x" var[:] = self.data var.units = self.units var.long_name = self.consensus_type + "_" + self.variable return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore(self, workspace_uuid): """ Restore the workspace to the given workspace_uuid. If workspace_uuid is None then create a new workspace and use it. """
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.uuid == workspace_uuid), None) if workspace is None: workspace = self.new_workspace() self._change_workspace(workspace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_workspace(self, name=None, layout=None, workspace_id=None, index=None) -> WorkspaceLayout.WorkspaceLayout: """ Create a new workspace, insert into document_model, and return it. """
workspace = WorkspaceLayout.WorkspaceLayout() self.document_model.insert_workspace(index if index is not None else len(self.document_model.workspaces), workspace) d = create_image_desc() d["selected"] = True workspace.layout = layout if layout is not None else d workspace.name = name if name is not None else _("Workspace") if workspace_id: workspace.workspace_id = workspace_id return workspace
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_workspace(self, name, layout, workspace_id): """Looks for a workspace with workspace_id. If none is found, create a new one, add it, and change to it. """
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.workspace_id == workspace_id), None) if not workspace: workspace = self.new_workspace(name=name, layout=layout, workspace_id=workspace_id) self._change_workspace(workspace)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_workspace(self) -> None: """ Pose a dialog to name and create a workspace. """
def create_clicked(text): if text: command = Workspace.CreateWorkspaceCommand(self, text) command.perform() self.document_controller.push_undo_command(command) self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=_("Workspace"), accepted_fn=create_clicked, accepted_text=_("Create"), message_box_id="create_workspace")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rename_workspace(self) -> None: """ Pose a dialog to rename the workspace. """
def rename_clicked(text): if len(text) > 0: command = Workspace.RenameWorkspaceCommand(self, text) command.perform() self.document_controller.push_undo_command(command) self.pose_get_string_message_box(caption=_("Enter new name for workspace"), text=self.__workspace.name, accepted_fn=rename_clicked, accepted_text=_("Rename"), message_box_id="rename_workspace")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_workspace(self): """ Pose a dialog to confirm removal then remove workspace. """
def confirm_clicked(): if len(self.document_model.workspaces) > 1: command = Workspace.RemoveWorkspaceCommand(self) command.perform() self.document_controller.push_undo_command(command) caption = _("Remove workspace named '{0}'?").format(self.__workspace.name) self.pose_confirmation_message_box(caption, confirm_clicked, accepted_text=_("Remove Workspace"), message_box_id="remove_workspace")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone_workspace(self) -> None: """ Pose a dialog to name and clone a workspace. """
def clone_clicked(text): if text: command = Workspace.CloneWorkspaceCommand(self, text) command.perform() self.document_controller.push_undo_command(command) self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=self.__workspace.name, accepted_fn=clone_clicked, accepted_text=_("Clone"), message_box_id="clone_workspace")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bootstrap(score_objs, n_boot=1000): """ Given a set of DistributedROC or DistributedReliability objects, this function performs a bootstrap resampling of the objects and returns n_boot aggregations of them. Args: score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method n_boot (int): Number of bootstrap samples Returns: An array of DistributedROC or DistributedReliability """
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True) return all_samples.sum(axis=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, forecasts, observations): """ Update the ROC curve with a set of forecasts and observations Args: forecasts: 1D array of forecast values observations: 1D array of observation values. """
for t, threshold in enumerate(self.thresholds): tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold)) fp = np.count_nonzero((forecasts >= threshold) & (observations < self.obs_threshold)) fn = np.count_nonzero((forecasts < threshold) & (observations >= self.obs_threshold)) tn = np.count_nonzero((forecasts < threshold) & (observations < self.obs_threshold)) self.contingency_tables.iloc[t] += [tp, fp, fn, tn]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self, other_roc): """ Ingest the values of another DistributedROC object into this one and update the statistics inplace. Args: other_roc: another DistributedROC object. """
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds): self.contingency_tables += other_roc.contingency_tables else: print("Input table thresholds do not match.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def performance_curve(self): """ Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram. Returns: pandas.DataFrame containing POD, FAR, and probability thresholds. """
pod = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"]) far = self.contingency_tables["FP"] / (self.contingency_tables["FP"] + self.contingency_tables["TP"]) far[(self.contingency_tables["FP"] + self.contingency_tables["TP"]) == 0] = np.nan return pd.DataFrame({"POD": pod, "FAR": far, "Thresholds": self.thresholds}, columns=["POD", "FAR", "Thresholds"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_csi(self): """ Calculate the maximum Critical Success Index across all probability thresholds Returns: The maximum CSI as a float """
csi = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"] + self.contingency_tables["FP"]) return csi.max()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_contingency_tables(self): """ Create an Array of ContingencyTable objects for each probability threshold. Returns: Array of ContingencyTable objects """
return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_str(self, in_str): """ Read the DistributedROC string and parse the contingency table values from it. Args: in_str (str): The string output from the __str__ method """
parts = in_str.split(";") for part in parts: var_name, value = part.split(":") if var_name == "Obs_Threshold": self.obs_threshold = float(value) elif var_name == "Thresholds": self.thresholds = np.array(value.split(), dtype=float) self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns, data=np.zeros((self.thresholds.size, self.contingency_tables.columns.size))) elif var_name in self.contingency_tables.columns: self.contingency_tables[var_name] = np.array(value.split(), dtype=int)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, forecasts, observations): """ Update the statistics with a set of forecasts and observations. Args: forecasts (numpy.ndarray): Array of forecast probability values observations (numpy.ndarray): Array of observation values """
for t, threshold in enumerate(self.thresholds[:-1]): self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]) & (observations >= self.obs_threshold)) self.frequencies.loc[t, "Total_Freq"] += np.count_nonzero((threshold <= forecasts) & (forecasts < self.thresholds[t+1]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self, other_rel): """ Ingest another DistributedReliability and add its contents to the current object. Args: other_rel: a Distributed reliability object. """
if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds): self.frequencies += other_rel.frequencies else: print("Input table thresholds do not match.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reliability_curve(self): """ Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq Returns: pandas.DataFrame """
total = self.frequencies["Total_Freq"].sum() curve = pd.DataFrame(columns=["Bin_Start", "Bin_End", "Bin_Center", "Positive_Relative_Freq", "Total_Relative_Freq"]) curve["Bin_Start"] = self.thresholds[:-1] curve["Bin_End"] = self.thresholds[1:] curve["Bin_Center"] = 0.5 * (self.thresholds[:-1] + self.thresholds[1:]) curve["Positive_Relative_Freq"] = self.frequencies["Positive_Freq"] / self.frequencies["Total_Freq"] curve["Total_Relative_Freq"] = self.frequencies["Total_Freq"] / total return curve
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def brier_score(self): """ Calculate the Brier Score """
reliability, resolution, uncertainty = self.brier_score_components() return reliability - resolution + uncertainty
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def brier_skill_score(self): """ Calculate the Brier Skill Score """
reliability, resolution, uncertainty = self.brier_score_components() return (resolution - reliability) / uncertainty
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, forecasts, observations): """ Update the statistics with forecasts and observations. Args: forecasts: The discrete Cumulative Distribution Functions of observations: """
if len(observations.shape) == 1: obs_cdfs = np.zeros((observations.size, self.thresholds.size)) for o, observation in enumerate(observations): obs_cdfs[o, self.thresholds >= observation] = 1 else: obs_cdfs = observations self.errors["F_2"] += np.sum(forecasts ** 2, axis=0) self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0) self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0) self.errors["O"] += np.sum(obs_cdfs, axis=0) self.num_forecasts += forecasts.shape[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crps(self): """ Calculates the continuous ranked probability score. """
return np.sum(self.errors["F_2"].values - self.errors["F_O"].values * 2.0 + self.errors["O_2"].values) / \ (self.thresholds.size * self.num_forecasts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crps_climo(self): """ Calculate the climatological CRPS. """
o_bar = self.errors["O"].values / float(self.num_forecasts) crps_c = np.sum(self.num_forecasts * (o_bar ** 2) - o_bar * self.errors["O"].values * 2.0 + self.errors["O_2"].values) / float(self.thresholds.size * self.num_forecasts) return crps_c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def crpss(self): """ Calculate the continous ranked probability skill score from existing data. """
crps_f = self.crps() crps_c = self.crps_climo() return 1.0 - float(crps_f) / float(crps_c)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_metadata_value(metadata_source, key: str) -> bool: """Return whether the metadata value for the given key exists. There are a set of predefined keys that, when used, will be type checked and be interoperable with other applications. Please consult reference documentation for valid keys. If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed by the predefined keys. e.g. 'session.instrument' or 'camera.binning'. Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer using the ``metadata_value`` methods over directly accessing ``metadata``. """
desc = session_key_map.get(key) if desc is not None: d = getattr(metadata_source, "session_metadata", dict()) for k in desc['path'][:-1]: d = d.setdefault(k, dict()) if d is not None else None if d is not None: return desc['path'][-1] in d desc = key_map.get(key) if desc is not None: d = getattr(metadata_source, "metadata", dict()) for k in desc['path'][:-1]: d = d.setdefault(k, dict()) if d is not None else None if d is not None: return desc['path'][-1] in d raise False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_metadata_value(metadata_source, key: str) -> None: """Delete the metadata value for the given key. There are a set of predefined keys that, when used, will be type checked and be interoperable with other applications. Please consult reference documentation for valid keys. If using a custom key, we recommend structuring your keys in the '<dotted>.<group>.<attribute>' format followed by the predefined keys. e.g. 'stem.session.instrument' or 'stm.camera.binning'. Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer using the ``metadata_value`` methods over directly accessing ``metadata``. """
desc = session_key_map.get(key) if desc is not None: d0 = getattr(metadata_source, "session_metadata", dict()) d = d0 for k in desc['path'][:-1]: d = d.setdefault(k, dict()) if d is not None else None if d is not None and desc['path'][-1] in d: d.pop(desc['path'][-1], None) metadata_source.session_metadata = d0 return desc = key_map.get(key) if desc is not None: d0 = getattr(metadata_source, "metadata", dict()) d = d0 for k in desc['path'][:-1]: d = d.setdefault(k, dict()) if d is not None else None if d is not None and desc['path'][-1] in d: d.pop(desc['path'][-1], None) metadata_source.metadata = d0 return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_y_ticks(self, plot_height): """Calculate the y-axis items dependent on the plot height."""
calibrated_data_min = self.calibrated_data_min calibrated_data_max = self.calibrated_data_max calibrated_data_range = calibrated_data_max - calibrated_data_min ticker = self.y_ticker y_ticks = list() for tick_value, tick_label in zip(ticker.values, ticker.labels): if calibrated_data_range != 0.0: y_tick = plot_height - plot_height * (tick_value - calibrated_data_min) / calibrated_data_range else: y_tick = plot_height - plot_height * 0.5 if y_tick >= 0 and y_tick <= plot_height: y_ticks.append((y_tick, tick_label)) return y_ticks
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_x_ticks(self, plot_width): """Calculate the x-axis items dependent on the plot width."""
x_calibration = self.x_calibration uncalibrated_data_left = self.__uncalibrated_left_channel uncalibrated_data_right = self.__uncalibrated_right_channel calibrated_data_left = x_calibration.convert_to_calibrated_value(uncalibrated_data_left) if x_calibration is not None else uncalibrated_data_left calibrated_data_right = x_calibration.convert_to_calibrated_value(uncalibrated_data_right) if x_calibration is not None else uncalibrated_data_right calibrated_data_left, calibrated_data_right = min(calibrated_data_left, calibrated_data_right), max(calibrated_data_left, calibrated_data_right) graph_left, graph_right, tick_values, division, precision = Geometry.make_pretty_range(calibrated_data_left, calibrated_data_right) drawn_data_width = self.drawn_right_channel - self.drawn_left_channel x_ticks = list() if drawn_data_width > 0.0: for tick_value in tick_values: label = nice_label(tick_value, precision) data_tick = x_calibration.convert_from_calibrated_value(tick_value) if x_calibration else tick_value x_tick = plot_width * (data_tick - self.drawn_left_channel) / drawn_data_width if x_tick >= 0 and x_tick <= plot_width: x_ticks.append((x_tick, label)) return x_ticks
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def size_to_content(self): """ Size the canvas item to the proper height. """
new_sizing = self.copy_sizing() new_sizing.minimum_height = 0 new_sizing.maximum_height = 0 axes = self.__axes if axes and axes.is_valid: if axes.x_calibration and axes.x_calibration.units: new_sizing.minimum_height = self.font_size + 4 new_sizing.maximum_height = self.font_size + 4 self.update_sizing(new_sizing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def size_to_content(self, get_font_metrics_fn): """ Size the canvas item to the proper width, the maximum of any label. """
new_sizing = self.copy_sizing() new_sizing.minimum_width = 0 new_sizing.maximum_width = 0 axes = self.__axes if axes and axes.is_valid: # calculate the width based on the label lengths font = "{0:d}px".format(self.font_size) max_width = 0 y_range = axes.calibrated_data_max - axes.calibrated_data_min label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5) max_width = max(max_width, get_font_metrics_fn(font, label).width) label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5) max_width = max(max_width, get_font_metrics_fn(font, label).width) new_sizing.minimum_width = max_width new_sizing.maximum_width = max_width self.update_sizing(new_sizing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def size_to_content(self): """ Size the canvas item to the proper width. """
new_sizing = self.copy_sizing() new_sizing.minimum_width = 0 new_sizing.maximum_width = 0 axes = self.__axes if axes and axes.is_valid: if axes.y_calibration and axes.y_calibration.units: new_sizing.minimum_width = self.font_size + 4 new_sizing.maximum_width = self.font_size + 4 self.update_sizing(new_sizing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_snippet_content(snippet_name, **format_kwargs): """ Load the content from a snippet file which exists in SNIPPETS_ROOT """
filename = snippet_name + '.snippet' snippet_file = os.path.join(SNIPPETS_ROOT, filename) if not os.path.isfile(snippet_file): raise ValueError('could not find snippet with name ' + filename) ret = helpers.get_file_content(snippet_file) if format_kwargs: ret = ret.format(**format_kwargs) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_display_properties(self, display_calibration_info, display_properties: typing.Mapping, display_layers: typing.Sequence[typing.Mapping]) -> None: """Update the display values. Called from display panel. This method saves the display values and data and triggers an update. It should be as fast as possible. As a layer, this canvas item will respond to the update by calling prepare_render on the layer's rendering thread. Prepare render will call prepare_display which will construct new axes and update all of the constituent canvas items such as the axes labels and the graph layers. Each will trigger its own update if its inputs have changed. The inefficiencies in this process are that the layer must re-render on each call to this function. There is also a cost within the constituent canvas items to check whether the axes or their data has changed. When the display is associated with a single data item, the data will be """
# may be called from thread; prevent a race condition with closing. with self.__closing_lock: if self.__closed: return displayed_dimensional_scales = display_calibration_info.displayed_dimensional_scales displayed_dimensional_calibrations = display_calibration_info.displayed_dimensional_calibrations self.__data_scale = displayed_dimensional_scales[-1] if len(displayed_dimensional_scales) > 0 else 1 self.__displayed_dimensional_calibration = displayed_dimensional_calibrations[-1] if len(displayed_dimensional_calibrations) > 0 else Calibration.Calibration(scale=displayed_dimensional_scales[-1]) self.__intensity_calibration = display_calibration_info.displayed_intensity_calibration self.__calibration_style = display_calibration_info.calibration_style self.__y_min = display_properties.get("y_min") self.__y_max = display_properties.get("y_max") self.__y_style = display_properties.get("y_style", "linear") self.__left_channel = display_properties.get("left_channel") self.__right_channel = display_properties.get("right_channel") self.__legend_position = display_properties.get("legend_position") self.__display_layers = display_layers if self.__display_values_list and len(self.__display_values_list) > 0: self.__xdata_list = [display_values.display_data_and_metadata if display_values else None for display_values in self.__display_values_list] xdata0 = self.__xdata_list[0] if xdata0: self.__update_frame(xdata0.metadata) else: self.__xdata_list = list() # update the cursor info self.__update_cursor_info() # mark for update. prepare display will mark children for update if necesssary. self.update()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __view_to_selected_graphics(self, data_and_metadata: DataAndMetadata.DataAndMetadata) -> None: """Change the view to encompass the selected graphic intervals."""
all_graphics = self.__graphics graphics = [graphic for graphic_index, graphic in enumerate(all_graphics) if self.__graphic_selection.contains(graphic_index)] intervals = list() for graphic in graphics: if isinstance(graphic, Graphics.IntervalGraphic): intervals.append(graphic.interval) self.__view_to_intervals(data_and_metadata, intervals)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __update_cursor_info(self): """ Map the mouse to the 1-d position within the line graph. """
if not self.delegate: # allow display to work without delegate return if self.__mouse_in and self.__last_mouse: pos_1d = None axes = self.__axes line_graph_canvas_item = self.line_graph_canvas_item if axes and axes.is_valid and line_graph_canvas_item: mouse = self.map_to_canvas_item(self.__last_mouse, line_graph_canvas_item) plot_rect = line_graph_canvas_item.canvas_bounds if plot_rect.contains_point(mouse): mouse = mouse - plot_rect.origin x = float(mouse.x) / plot_rect.width px = axes.drawn_left_channel + x * (axes.drawn_right_channel - axes.drawn_left_channel) pos_1d = px, self.delegate.cursor_changed(pos_1d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_model_patch_tracks(self): """ Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass. Returns: """
self.model_grid.load_data() tracked_model_objects = [] model_objects = [] if self.model_grid.data is None: print("No model output found") return tracked_model_objects min_orig = self.model_ew.min_thresh max_orig = self.model_ew.max_thresh data_increment_orig = self.model_ew.data_increment self.model_ew.min_thresh = 0 self.model_ew.data_increment = 1 self.model_ew.max_thresh = 100 for h, hour in enumerate(self.hours): # Identify storms at each time step and apply size filter print("Finding {0} objects for run {1} Hour: {2:02d}".format(self.ensemble_member, self.run_date.strftime("%Y%m%d%H"), hour)) if self.mask is not None: model_data = self.model_grid.data[h] * self.mask else: model_data = self.model_grid.data[h] model_data[:self.patch_radius] = 0 model_data[-self.patch_radius:] = 0 model_data[:, :self.patch_radius] = 0 model_data[:, -self.patch_radius:] = 0 scaled_data = np.array(rescale_data(model_data, min_orig, max_orig)) hour_labels = label_storm_objects(scaled_data, "ew", self.model_ew.min_thresh, self.model_ew.max_thresh, min_area=self.size_filter, max_area=self.model_ew.max_size, max_range=self.model_ew.delta, increment=self.model_ew.data_increment, gaussian_sd=self.gaussian_window) model_objects.extend(extract_storm_patches(hour_labels, model_data, self.model_grid.x, self.model_grid.y, [hour], dx=self.model_grid.dx, patch_radius=self.patch_radius)) for model_obj in model_objects[-1]: dims = model_obj.timesteps[-1].shape if h > 0: model_obj.estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0]) del scaled_data del model_data del hour_labels tracked_model_objects.extend(track_storms(model_objects, self.hours, self.object_matcher.cost_function_components, self.object_matcher.max_values, self.object_matcher.weights)) self.model_ew.min_thresh = min_orig self.model_ew.max_thresh = max_orig self.model_ew.data_increment = data_increment_orig return tracked_model_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_mrms_tracks(self): """ Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information. """
obs_objects = [] tracked_obs_objects = [] if self.mrms_ew is not None: self.mrms_grid.load_data() if len(self.mrms_grid.data) != len(self.hours): print('Less than 24 hours of observation data found') return tracked_obs_objects for h, hour in enumerate(self.hours): mrms_data = np.zeros(self.mrms_grid.data[h].shape) mrms_data[:] = np.array(self.mrms_grid.data[h]) mrms_data[mrms_data < 0] = 0 hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter) hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0 obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) obs_objects.append([]) if num_slices > 0: for sl in obj_slices: obs_objects[-1].append(STObject(mrms_data[sl], np.where(hour_labels[sl] > 0, 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx)) if h > 0: dims = obs_objects[-1][-1].timesteps[0].shape obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0]) for h, hour in enumerate(self.hours): past_time_objs = [] for obj in tracked_obs_objects: if obj.end_time == hour - 1: past_time_objs.append(obj) if len(past_time_objs) == 0: tracked_obs_objects.extend(obs_objects[h]) elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0: assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour) unpaired = list(range(len(obs_objects[h]))) for pair in assignments: past_time_objs[pair[0]].extend(obs_objects[h][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_obs_objects.append(obs_objects[h][up]) print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour)) return tracked_obs_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False): """ Match forecast and observed tracks. Args: model_tracks: obs_tracks: unique_matches: closest_matches: Returns: """
if unique_matches: pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches) else: pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks) return pairings
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_hail_sizes(model_tracks, obs_tracks, track_pairings): """ Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the intermediate timesteps. Args: model_tracks: List of model track STObjects obs_tracks: List of observed STObjects track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks """
unpaired = list(range(len(model_tracks))) for p, pair in enumerate(track_pairings): model_track = model_tracks[pair[0]] unpaired.remove(pair[0]) obs_track = obs_tracks[pair[1]] obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max() for t, step in enumerate(obs_track.timesteps)]) if obs_track.times.size > 1 and model_track.times.size > 1: normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\ * (obs_track.times - obs_track.times.min()) normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\ * (model_track.times - model_track.times.min()) hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest", bounds_error=False, fill_value=0) model_track.observations = hail_interp(normalized_model_times) elif obs_track.times.size == 1: model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0] elif model_track.times.size == 1: model_track.observations = np.array([obs_hail_sizes.max()]) print(pair[0], "obs", obs_hail_sizes) print(pair[0], "model", model_track.observations) for u in unpaired: model_tracks[u].observations = np.zeros(model_tracks[u].times.shape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_track_errors(model_tracks, obs_tracks, track_pairings): """ Calculates spatial and temporal translation errors between matched forecast and observed tracks. Args: model_tracks: List of model track STObjects obs_tracks: List of observed track STObjects track_pairings: List of tuples pairing forecast and observed tracks. Returns: pandas DataFrame containing different track errors """
columns = ['obs_track_id', 'translation_error_x', 'translation_error_y', 'start_time_difference', 'end_time_difference', ] track_errors = pd.DataFrame(index=list(range(len(model_tracks))), columns=columns) for p, pair in enumerate(track_pairings): model_track = model_tracks[pair[0]] if type(pair[1]) in [int, np.int64]: obs_track = obs_tracks[pair[1]] else: obs_track = obs_tracks[pair[1][0]] model_com = model_track.center_of_mass(model_track.start_time) obs_com = obs_track.center_of_mass(obs_track.start_time) track_errors.loc[pair[0], 'obs_track_id'] = pair[1] if type(pair[1]) in [int, np.int64] else pair[1][0] track_errors.loc[pair[0], 'translation_error_x'] = model_com[0] - obs_com[0] track_errors.loc[pair[0], 'translation_error_y'] = model_com[1] - obs_com[1] track_errors.loc[pair[0], 'start_time_difference'] = model_track.start_time - obs_track.start_time track_errors.loc[pair[0], 'end_time_difference'] = model_track.end_time - obs_track.end_time return track_errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __display_for_tree_node(self, tree_node): """ Return the text display for the given tree node. Based on number of keys associated with tree node. """
keys = tree_node.keys if len(keys) == 1: return "{0} ({1})".format(tree_node.keys[-1], tree_node.count) elif len(keys) == 2: months = (_("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"), _("September"), _("October"), _("November"), _("December")) return "{0} ({1})".format(months[max(min(tree_node.keys[1]-1, 11), 0)], tree_node.count) else: weekdays = (_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday")) date = datetime.date(tree_node.keys[0], tree_node.keys[1], tree_node.keys[2]) return "{0} - {1} ({2})".format(tree_node.keys[2], weekdays[date.weekday()], tree_node.count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __insert_child(self, parent_tree_node, index, tree_node): """ Called from the root tree node when a new node is inserted into tree. This method creates properties to represent the node for display and inserts it into the item model controller. """
# manage the item model parent_item = self.__mapping[id(parent_tree_node)] self.item_model_controller.begin_insert(index, index, parent_item.row, parent_item.id) properties = { "display": self.__display_for_tree_node(tree_node), "tree_node": tree_node # used for removal and other lookup } item = self.item_model_controller.create_item(properties) parent_item.insert_child(index, item) self.__mapping[id(tree_node)] = item self.item_model_controller.end_insert()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __remove_child(self, parent_tree_node, index): """ Called from the root tree node when a node is removed from the tree. This method removes it into the item model controller. """
# get parent and item parent_item = self.__mapping[id(parent_tree_node)] # manage the item model self.item_model_controller.begin_remove(index, index, parent_item.row, parent_item.id) child_item = parent_item.children[index] parent_item.remove_child(child_item) self.__mapping.pop(id(child_item.data["tree_node"])) self.item_model_controller.end_remove()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_all_nodes(self): """ Update all tree item displays if needed. Usually for count updates. """
item_model_controller = self.item_model_controller if item_model_controller: if self.__node_counts_dirty: for item in self.__mapping.values(): if "tree_node" in item.data: # don't update the root node tree_node = item.data["tree_node"] item.data["display"] = self.__display_for_tree_node(tree_node) item_model_controller.data_changed(item.row, item.parent.row, item.parent.id) self.__node_counts_dirty = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def date_browser_selection_changed(self, selected_indexes): """ Called to handle selection changes in the tree widget. This method should be connected to the on_selection_changed event. This method builds a list of keys represented by all selected items. It then provides date_filter to filter data items based on the list of keys. It then sets the filter into the document controller. :param selected_indexes: The selected indexes :type selected_indexes: list of ints """
partial_date_filters = list() for index, parent_row, parent_id in selected_indexes: item_model_controller = self.item_model_controller tree_node = item_model_controller.item_value("tree_node", index, parent_id) partial_date_filters.append(ListModel.PartialDateFilter("created_local", *tree_node.keys)) if len(partial_date_filters) > 0: self.__date_filter = ListModel.OrFilter(partial_date_filters) else: self.__date_filter = None self.__update_filter()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text_filter_changed(self, text): """ Called to handle changes to the text filter. :param text: The text for the filter. """
text = text.strip() if text else None if text is not None: self.__text_filter = ListModel.TextFilter("text_for_filter", text) else: self.__text_filter = None self.__update_filter()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __update_filter(self): """ Create a combined filter. Set the resulting filter into the document controller. """
filters = list() if self.__date_filter: filters.append(self.__date_filter) if self.__text_filter: filters.append(self.__text_filter) self.document_controller.display_filter = ListModel.AndFilter(filters)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_keys(self): """ Return the keys associated with this node by adding its key and then adding parent keys recursively. """
keys = list() tree_node = self while tree_node is not None and tree_node.key is not None: keys.insert(0, tree_node.key) tree_node = tree_node.parent return keys
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def label_storm_objects(data, method, min_intensity, max_intensity, min_area=1, max_area=100, max_range=1, increment=1, gaussian_sd=0): """ From a 2D grid or time series of 2D grids, this method labels storm objects with either the Enhanced Watershed or Hysteresis methods. Args: data: the gridded data to be labeled. Should be a 2D numpy array in (y, x) coordinate order or a 3D numpy array in (time, y, x) coordinate order method: "ew" or "watershed" for Enhanced Watershed or "hyst" for hysteresis min_intensity: Minimum intensity threshold for gridpoints contained within any objects max_intensity: For watershed, any points above max_intensity are considered as the same value as max intensity. For hysteresis, all objects have to contain at least 1 pixel that equals or exceeds this value min_area: (default 1) The minimum area of any object in pixels. max_area: (default 100) The area threshold in pixels at which the enhanced watershed ends growth. Object area may exceed this threshold if the pixels at the last watershed level exceed the object area. max_range: Maximum difference between the maximum and minimum value in an enhanced watershed object before growth is stopped. increment: Discretization increment for the enhanced watershed gaussian_sd: Standard deviation of Gaussian filter applied to data Returns: label_grid: an ndarray with the same shape as data in which each pixel is labeled with a positive integer value. """
if method.lower() in ["ew", "watershed"]: labeler = EnhancedWatershed(min_intensity, increment, max_intensity, max_area, max_range) else: labeler = Hysteresis(min_intensity, max_intensity) if len(data.shape) == 2: label_grid = labeler.label(gaussian_filter(data, gaussian_sd)) label_grid[data < min_intensity] = 0 if min_area > 1: label_grid = labeler.size_filter(label_grid, min_area) else: label_grid = np.zeros(data.shape, dtype=int) for t in range(data.shape[0]): label_grid[t] = labeler.label(gaussian_filter(data[t], gaussian_sd)) label_grid[t][data[t] < min_intensity] = 0 if min_area > 1: label_grid[t] = labeler.size_filter(label_grid[t], min_area) return label_grid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_storm_objects(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, obj_buffer=0): """ After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects. The STObjects contain intensity, location, and shape information about each storm at each timestep. Args: label_grid: 2D or 3D array output by label_storm_objects. data: 2D or 3D array used as input to label_storm_objects. x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length. y_grid: 2D array of y-coordinate data. times: List or array of time values, preferably as integers dx: grid spacing in same units as x_grid and y_grid. dt: period elapsed between times obj_buffer: number of extra pixels beyond bounding box of object to store in each STObject Returns: storm_objects: list of lists containing STObjects identified at each time. """
storm_objects = [] if len(label_grid.shape) == 3: ij_grid = np.indices(label_grid.shape[1:]) for t, time in enumerate(times): storm_objects.append([]) object_slices = list(find_objects(label_grid[t], label_grid[t].max())) if len(object_slices) > 0: for o, obj_slice in enumerate(object_slices): if obj_buffer > 0: obj_slice_buff = [slice(np.maximum(0, osl.start - obj_buffer), np.minimum(osl.stop + obj_buffer, label_grid.shape[l + 1])) for l, osl in enumerate(obj_slice)] else: obj_slice_buff = obj_slice storm_objects[-1].append(STObject(data[t][obj_slice_buff], np.where(label_grid[t][obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt)) if t > 0: dims = storm_objects[-1][-1].timesteps[0].shape storm_objects[-1][-1].estimate_motion(time, data[t - 1], dims[1], dims[0]) else: ij_grid = np.indices(label_grid.shape) storm_objects.append([]) object_slices = list(find_objects(label_grid, label_grid.max())) if len(object_slices) > 0: for o, obj_slice in enumerate(object_slices): if obj_buffer > 0: obj_slice_buff = [slice(np.maximum(0, osl.start - obj_buffer), np.minimum(osl.stop + obj_buffer, label_grid.shape[l + 1])) for l, osl in enumerate(obj_slice)] else: obj_slice_buff = obj_slice storm_objects[-1].append(STObject(data[obj_slice_buff], np.where(label_grid[obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times, times, dx=dx, step=dt)) return storm_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_storm_patches(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, patch_radius=16): """ After storms are labeled, this method extracts boxes of equal size centered on each storm from the grid and places them into STObjects. The STObjects contain intensity, location, and shape information about each storm at each timestep. Args: label_grid: 2D or 3D array output by label_storm_objects. data: 2D or 3D array used as input to label_storm_objects. x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length. y_grid: 2D array of y-coordinate data. times: List or array of time values, preferably as integers dx: grid spacing in same units as x_grid and y_grid. dt: period elapsed between times patch_radius: Number of grid points from center of mass to extract Returns: storm_objects: list of lists containing STObjects identified at each time. """
storm_objects = [] if len(label_grid.shape) == 3: ij_grid = np.indices(label_grid.shape[1:]) for t, time in enumerate(times): storm_objects.append([]) # object_slices = find_objects(label_grid[t], label_grid[t].max()) centers = list(center_of_mass(data[t], labels=label_grid[t], index=np.arange(1, label_grid[t].max() + 1))) if len(centers) > 0: for o, center in enumerate(centers): int_center = np.round(center).astype(int) obj_slice_buff = [slice(int_center[0] - patch_radius, int_center[0] + patch_radius), slice(int_center[1] - patch_radius, int_center[1] + patch_radius)] storm_objects[-1].append(STObject(data[t][obj_slice_buff], np.where(label_grid[t][obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt)) if t > 0: dims = storm_objects[-1][-1].timesteps[0].shape storm_objects[-1][-1].estimate_motion(time, data[t - 1], dims[1], dims[0]) else: ij_grid = np.indices(label_grid.shape) storm_objects.append([]) centers = list(center_of_mass(data, labels=label_grid, index=np.arange(1, label_grid.max() + 1))) if len(centers) > 0: for o, center in enumerate(centers): int_center = np.round(center).astype(int) obj_slice_buff = (slice(int_center[0] - patch_radius, int_center[0] + patch_radius), slice(int_center[1] - patch_radius, int_center[1] + patch_radius)) storm_objects[-1].append(STObject(data[obj_slice_buff], np.where(label_grid[obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times[0], times[0], dx=dx, step=dt)) return storm_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def track_storms(storm_objects, times, distance_components, distance_maxima, distance_weights, tracked_objects=None): """ Given the output of extract_storm_objects, this method tracks storms through time and merges individual STObjects into a set of tracks. Args: storm_objects: list of list of STObjects that have not been tracked. times: List of times associated with each set of STObjects distance_components: list of function objects that make up components of distance function distance_maxima: array of maximum values for each distance for normalization purposes distance_weights: weight given to each component of the distance function. Should add to 1. tracked_objects: List of STObjects that have already been tracked. Returns: tracked_objects: """
obj_matcher = ObjectMatcher(distance_components, distance_weights, distance_maxima) if tracked_objects is None: tracked_objects = [] for t, time in enumerate(times): past_time_objects = [] for obj in tracked_objects: if obj.end_time == time - obj.step: past_time_objects.append(obj) if len(past_time_objects) == 0: tracked_objects.extend(storm_objects[t]) elif len(past_time_objects) > 0 and len(storm_objects[t]) > 0: assignments = obj_matcher.match_objects(past_time_objects, storm_objects[t], times[t-1], times[t]) unpaired = list(range(len(storm_objects[t]))) for pair in assignments: past_time_objects[pair[0]].extend(storm_objects[t][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_objects.append(storm_objects[t][up]) return tracked_objects
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def centroid_distance(item_a, time_a, item_b, time_b, max_value): """ Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value): """ Centroid distance with motion corrections. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) if time_a < time_b: bx = bx - item_b.u by = by - item_b.v else: ax = ax - item_a.u ay = ay - item_a.v return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closest_distance(item_a, time_a, item_b, time_b, max_value): """ Euclidean distance between the pixels in item_a and item_b closest to each other. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ellipse_distance(item_a, time_a, item_b, time_b, max_value): """ Calculate differences in the properties of ellipses fitted to each object. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
ts = np.array([0, np.pi]) ell_a = item_a.get_ellipse_model(time_a) ell_b = item_b.get_ellipse_model(time_b) ends_a = ell_a.predict_xy(ts) ends_b = ell_b.predict_xy(ts) distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) ** 2 + (ends_a[:, 1:] - ends_b[:, 1:].T) ** 2) return np.minimum(distances[0, 1], max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nonoverlap(item_a, time_a, item_b, time_b, max_value): """ Percentage of pixels in each object that do not overlap with the other object Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_intensity(item_a, time_a, item_b, time_b, max_value): """ RMS difference in maximum intensity Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
intensity_a = item_a.max_intensity(time_a) intensity_b = item_b.max_intensity(time_b) diff = np.sqrt((intensity_a - intensity_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def area_difference(item_a, time_a, item_b, time_b, max_value): """ RMS Difference in object areas. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
size_a = item_a.size(time_a) size_b = item_b.size(time_b) diff = np.sqrt((size_a - size_b) ** 2) return np.minimum(diff, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mean_minimum_centroid_distance(item_a, item_b, max_value): """ RMS difference in the minimum distances from the centroids of one track to the centroids of another track Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times]) centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times]) distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mean_min_time_distance(item_a, item_b, max_value): """ Calculate the mean time difference among the time steps in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
times_a = item_a.times.reshape((item_a.times.size, 1)) times_b = item_b.times.reshape((1, item_b.times.size)) distance_matrix = (times_a - times_b) ** 2 mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean()) return np.minimum(mean_min_distances, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_centroid_distance(item_a, item_b, max_value): """ Distance between the centroids of the first step in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
start_a = item_a.center_of_mass(item_a.times[0]) start_b = item_b.center_of_mass(item_b.times[0]) start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2) return np.minimum(start_distance, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_time_distance(item_a, item_b, max_value): """ Absolute difference between the starting times of each item. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
start_time_diff = np.abs(item_a.times[0] - item_b.times[0]) return np.minimum(start_time_diff, max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def duration_distance(item_a, item_b, max_value): """ Absolute difference in the duration of two items Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
duration_a = item_a.times.size duration_b = item_b.times.size return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mean_area_distance(item_a, item_b, max_value): """ Absolute difference in the means of the areas of each track over time. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """
mean_area_a = np.mean([item_a.size(t) for t in item_a.times]) mean_area_b = np.mean([item_b.size(t) for t in item_b.times]) return np.abs(mean_area_a - mean_area_b) / float(max_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_objects(self, set_a, set_b, time_a, time_b): """ Match two sets of objects at particular times. Args: set_a: list of STObjects set_b: list of STObjects time_a: time at which set_a is being evaluated for matching time_b: time at which set_b is being evaluated for matching Returns: List of tuples containing (set_a index, set_b index) for each match """
costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100 min_row_costs = costs.min(axis=1) min_col_costs = costs.min(axis=0) good_rows = np.where(min_row_costs < 100)[0] good_cols = np.where(min_col_costs < 100)[0] assignments = [] if len(good_rows) > 0 and len(good_cols) > 0: munk = Munkres() initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist()) initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments] for a in initial_assignments: if costs[a[0], a[1]] < 100: assignments.append(a) return assignments
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def total_cost_function(self, item_a, item_b, time_a, time_b): """ Calculate total cost function between two items. Args: item_a: STObject item_b: STObject time_a: Timestep in item_a at which cost function is evaluated time_b: Timestep in item_b at which cost function is evaluated Returns: The total weighted distance between item_a and item_b """
distances = np.zeros(len(self.weights)) for c, component in enumerate(self.cost_function_components): distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c]) total_distance = np.sum(self.weights * distances) return total_distance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def variable_specifier(self) -> dict: """Return the variable specifier for this variable. The specifier can be used to lookup the value of this variable in a computation context. """
if self.value_type is not None: return {"type": "variable", "version": 1, "uuid": str(self.uuid), "x-name": self.name, "x-value": self.value} else: return self.specifier
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bound_variable(self): """Return an object with a value property and a changed_event. The value property returns the value of the variable. The changed_event is fired whenever the value changes. """
class BoundVariable: def __init__(self, variable): self.__variable = variable self.changed_event = Event.Event() self.needs_rebind_event = Event.Event() def property_changed(key): if key == "value": self.changed_event.fire() self.__variable_property_changed_listener = variable.property_changed_event.listen(property_changed) @property def value(self): return self.__variable.value def close(self): self.__variable_property_changed_listener.close() self.__variable_property_changed_listener = None return BoundVariable(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve_object_specifier(self, object_specifier, secondary_specifier=None, property_name=None, objects_model=None): """Resolve the object specifier. First lookup the object specifier in the enclosing computation. If it's not found, then lookup in the computation's context. Otherwise it should be a value type variable. In that case, return the bound variable. """
variable = self.__computation().resolve_variable(object_specifier) if not variable: return self.__context.resolve_object_specifier(object_specifier, secondary_specifier, property_name, objects_model) elif variable.specifier is None: return variable.bound_variable return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_names(cls, expression): """Return the list of identifiers used in the expression."""
names = set() try: ast_node = ast.parse(expression, "ast") class Visitor(ast.NodeVisitor): def visit_Name(self, node): names.add(node.id) Visitor().visit(ast_node) except Exception: pass return names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind(self, context) -> None: """Bind a context to this computation. The context allows the computation to convert object specifiers to actual objects. """
# make a computation context based on the enclosing context. self.__computation_context = ComputationContext(self, context) # re-bind is not valid. be careful to set the computation after the data item is already in document. for variable in self.variables: assert variable.bound_item is None for result in self.results: assert result.bound_item is None # bind the variables for variable in self.variables: self.__bind_variable(variable) # bind the results for result in self.results: self.__bind_result(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unbind(self): """Unlisten and close each bound item."""
for variable in self.variables: self.__unbind_variable(variable) for result in self.results: self.__unbind_result(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_by_date_key(data_item): """ A sort key to for the created field of a data item. The sort by uuid makes it determinate. """
return data_item.title + str(data_item.uuid) if data_item.is_live else str(), data_item.date_for_sorting, str(data_item.uuid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_r_value(self, r_var: str, *, notify_changed=True) -> None: """Used to signal changes to the ref var, which are kept in document controller. ugh."""
self.r_var = r_var self._description_changed() if notify_changed: # set to False to set the r-value at startup; avoid marking it as a change self.__notify_description_changed()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_data_and_metadata(self, data_and_metadata, data_modified=None): """Sets the underlying data and data-metadata to the data_and_metadata. Note: this does not make a copy of the data. """
self.increment_data_ref_count() try: if data_and_metadata: data = data_and_metadata.data data_shape_and_dtype = data_and_metadata.data_shape_and_dtype intensity_calibration = data_and_metadata.intensity_calibration dimensional_calibrations = data_and_metadata.dimensional_calibrations metadata = data_and_metadata.metadata timestamp = data_and_metadata.timestamp data_descriptor = data_and_metadata.data_descriptor timezone = data_and_metadata.timezone or Utility.get_local_timezone() timezone_offset = data_and_metadata.timezone_offset or Utility.TimezoneMinutesToStringConverter().convert(Utility.local_utcoffset_minutes()) new_data_and_metadata = DataAndMetadata.DataAndMetadata(self.__load_data, data_shape_and_dtype, intensity_calibration, dimensional_calibrations, metadata, timestamp, data, data_descriptor, timezone, timezone_offset) else: new_data_and_metadata = None self.__set_data_metadata_direct(new_data_and_metadata, data_modified) if self.__data_and_metadata is not None: if self.persistent_object_context and not self.persistent_object_context.is_write_delayed(self): self.persistent_object_context.write_external_data(self, "data", self.__data_and_metadata.data) self.__data_and_metadata.unloadable = True finally: self.decrement_data_ref_count()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_calculated_display_values(self, immediate: bool=False) -> DisplayValues: """Return the display values. Return the current (possibly uncalculated) display values unless 'immediate' is specified. If 'immediate', return the existing (calculated) values if they exist. Using the 'immediate' values avoids calculation except in cases where the display values haven't already been calculated. """
if not immediate or not self.__is_master or not self.__last_display_values: if not self.__current_display_values and self.__data_item: self.__current_display_values = DisplayValues(self.__data_item.xdata, self.sequence_index, self.collection_index, self.slice_center, self.slice_width, self.display_limits, self.complex_display_type, self.__color_map_data) def finalize(display_values): self.__last_display_values = display_values self.display_values_changed_event.fire() self.__current_display_values.on_finalize = finalize return self.__current_display_values return self.__last_display_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auto_display_limits(self): """Calculate best display limits and set them."""
display_data_and_metadata = self.get_calculated_display_values(True).display_data_and_metadata data = display_data_and_metadata.data if display_data_and_metadata else None if data is not None: # The old algorithm was a problem during EELS where the signal data # is a small percentage of the overall data and was falling outside # the included range. This is the new simplified algorithm. Future # feature may allow user to select more complex algorithms. mn, mx = numpy.nanmin(data), numpy.nanmax(data) self.display_limits = mn, mx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_graphic(self, graphic: Graphics.Graphic, *, safe: bool=False) -> typing.Optional[typing.Sequence]: """Remove a graphic, but do it through the container, so dependencies can be tracked."""
return self.remove_model_item(self, "graphics", graphic, safe=safe)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Shape of the underlying data, if only one."""
if not self.__data_and_metadata: return None return self.__data_and_metadata.dimensional_shape
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_local_file(fp, name_bytes, writer, dt): """ Writes a zip file local file header structure at the current file position. Returns data_len, crc32 for the data. :param fp: the file point to which to write the header :param name: the name of the file :param writer: a function taking an fp parameter to do the writing, returns crc32 :param dt: the datetime to write to the archive """
fp.write(struct.pack('I', 0x04034b50)) # local file header fp.write(struct.pack('H', 10)) # extract version (default) fp.write(struct.pack('H', 0)) # general purpose bits fp.write(struct.pack('H', 0)) # compression method msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day) msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second) fp.write(struct.pack('H', msdos_time)) # extract version (default) fp.write(struct.pack('H', msdos_date)) # extract version (default) crc32_pos = fp.tell() fp.write(struct.pack('I', 0)) # crc32 placeholder data_len_pos = fp.tell() fp.write(struct.pack('I', 0)) # compressed length placeholder fp.write(struct.pack('I', 0)) # uncompressed length placeholder fp.write(struct.pack('H', len(name_bytes))) # name length fp.write(struct.pack('H', 0)) # extra length fp.write(name_bytes) data_start_pos = fp.tell() crc32 = writer(fp) data_end_pos = fp.tell() data_len = data_end_pos - data_start_pos fp.seek(crc32_pos) fp.write(struct.pack('I', crc32)) # crc32 fp.seek(data_len_pos) fp.write(struct.pack('I', data_len)) # compressed length placeholder fp.write(struct.pack('I', data_len)) # uncompressed length placeholder fp.seek(data_end_pos) return data_len, crc32
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_directory_data(fp, offset, name_bytes, data_len, crc32, dt): """ Write a zip fie directory entry at the current file position :param fp: the file point to which to write the header :param offset: the offset of the associated local file header :param name: the name of the file :param data_len: the length of data that will be written to the archive :param crc32: the crc32 of the data to be written :param dt: the datetime to write to the archive """
fp.write(struct.pack('I', 0x02014b50)) # central directory header fp.write(struct.pack('H', 10)) # made by version (default) fp.write(struct.pack('H', 10)) # extract version (default) fp.write(struct.pack('H', 0)) # general purpose bits fp.write(struct.pack('H', 0)) # compression method msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day) msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second) fp.write(struct.pack('H', msdos_time)) # extract version (default) fp.write(struct.pack('H', msdos_date)) # extract version (default) fp.write(struct.pack('I', crc32)) # crc32 fp.write(struct.pack('I', data_len)) # compressed length fp.write(struct.pack('I', data_len)) # uncompressed length fp.write(struct.pack('H', len(name_bytes))) # name length fp.write(struct.pack('H', 0)) # extra length fp.write(struct.pack('H', 0)) # comments length fp.write(struct.pack('H', 0)) # disk number fp.write(struct.pack('H', 0)) # internal file attributes fp.write(struct.pack('I', 0)) # external file attributes fp.write(struct.pack('I', offset)) # relative offset of file header fp.write(name_bytes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_end_of_directory(fp, dir_size, dir_offset, count): """ Write zip file end of directory header at the current file position :param fp: the file point to which to write the header :param dir_size: the total size of the directory :param dir_offset: the start of the first directory header :param count: the count of files """
fp.write(struct.pack('I', 0x06054b50)) # central directory header fp.write(struct.pack('H', 0)) # disk number fp.write(struct.pack('H', 0)) # disk number fp.write(struct.pack('H', count)) # number of files fp.write(struct.pack('H', count)) # number of files fp.write(struct.pack('I', dir_size)) # central directory size fp.write(struct.pack('I', dir_offset)) # central directory offset fp.write(struct.pack('H', 0))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_zip_fp(fp, data, properties, dir_data_list=None): """ Write custom zip file of data and properties to fp :param fp: the file point to which to write the header :param data: the data to write to the file; may be None :param properties: the properties to write to the file; may be None :param dir_data_list: optional list of directory header information structures If dir_data_list is specified, data should be None and properties should be specified. Then the existing data structure will be left alone and only the directory headers and end of directory header will be written. Otherwise, if both data and properties are specified, both are written out in full. The properties param must not change during this method. Callers should take care to ensure this does not happen. """
assert data is not None or properties is not None # dir_data_list has the format: local file record offset, name, data length, crc32 dir_data_list = list() if dir_data_list is None else dir_data_list dt = datetime.datetime.now() if data is not None: offset_data = fp.tell() def write_data(fp): numpy_start_pos = fp.tell() numpy.save(fp, data) numpy_end_pos = fp.tell() fp.seek(numpy_start_pos) data_c = numpy.require(data, dtype=data.dtype, requirements=["C_CONTIGUOUS"]) header_data = fp.read((numpy_end_pos - numpy_start_pos) - data_c.nbytes) # read the header data_crc32 = binascii.crc32(data_c.data, binascii.crc32(header_data)) & 0xFFFFFFFF fp.seek(numpy_end_pos) return data_crc32 data_len, crc32 = write_local_file(fp, b"data.npy", write_data, dt) dir_data_list.append((offset_data, b"data.npy", data_len, crc32)) if properties is not None: json_str = str() try: class JSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Geometry.IntPoint) or isinstance(obj, Geometry.IntSize) or isinstance(obj, Geometry.IntRect) or isinstance(obj, Geometry.FloatPoint) or isinstance(obj, Geometry.FloatSize) or isinstance(obj, Geometry.FloatRect): return tuple(obj) else: return json.JSONEncoder.default(self, obj) json_io = io.StringIO() json.dump(properties, json_io, cls=JSONEncoder) json_str = json_io.getvalue() except Exception as e: # catch exceptions to avoid corrupt zip files import traceback logging.error("Exception writing zip file %s" + str(e)) traceback.print_exc() traceback.print_stack() def write_json(fp): json_bytes = bytes(json_str, 'ISO-8859-1') fp.write(json_bytes) return binascii.crc32(json_bytes) & 0xFFFFFFFF offset_json = fp.tell() json_len, json_crc32 = write_local_file(fp, b"metadata.json", write_json, dt) dir_data_list.append((offset_json, b"metadata.json", json_len, json_crc32)) dir_offset = fp.tell() for offset, name_bytes, data_len, crc32 in dir_data_list: write_directory_data(fp, offset, name_bytes, data_len, crc32, dt) dir_size = fp.tell() - dir_offset write_end_of_directory(fp, dir_size, dir_offset, len(dir_data_list)) fp.truncate()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_zip(file_path, data, properties): """ Write custom zip file to the file path :param file_path: the file to which to write the zip file :param data: the data to write to the file; may be None :param properties: the properties to write to the file; may be None The properties param must not change during this method. Callers should take care to ensure this does not happen. See write_zip_fp. """
with open(file_path, "w+b") as fp: write_zip_fp(fp, data, properties)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_zip(fp): """ Parse the zip file headers at fp :param fp: the file pointer from which to parse the zip file :return: A tuple of local files, directory headers, and end of central directory The local files are dictionary where the keys are the local file offset and the values are each a tuple consisting of the name, data position, data length, and crc32. The directory headers are a dictionary where the keys are the names of the files and the values are a tuple consisting of the directory header position, and the associated local file position. The end of central directory is a tuple consisting of the location of the end of central directory header and the location of the first directory header. This method will seek to location 0 of fp and leave fp at end of file. """
local_files = {} dir_files = {} eocd = None fp.seek(0) while True: pos = fp.tell() signature = struct.unpack('I', fp.read(4))[0] if signature == 0x04034b50: fp.seek(pos + 14) crc32 = struct.unpack('I', fp.read(4))[0] fp.seek(pos + 18) data_len = struct.unpack('I', fp.read(4))[0] fp.seek(pos + 26) name_len = struct.unpack('H', fp.read(2))[0] extra_len = struct.unpack('H', fp.read(2))[0] name_bytes = fp.read(name_len) fp.seek(extra_len, os.SEEK_CUR) data_pos = fp.tell() fp.seek(data_len, os.SEEK_CUR) local_files[pos] = (name_bytes, data_pos, data_len, crc32) elif signature == 0x02014b50: fp.seek(pos + 28) name_len = struct.unpack('H', fp.read(2))[0] extra_len = struct.unpack('H', fp.read(2))[0] comment_len = struct.unpack('H', fp.read(2))[0] fp.seek(pos + 42) pos2 = struct.unpack('I', fp.read(4))[0] name_bytes = fp.read(name_len) fp.seek(pos + 46 + name_len + extra_len + comment_len) dir_files[name_bytes] = (pos, pos2) elif signature == 0x06054b50: fp.seek(pos + 16) pos2 = struct.unpack('I', fp.read(4))[0] eocd = (pos, pos2) break else: raise IOError() return local_files, dir_files, eocd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_data(fp, local_files, dir_files, name_bytes): """ Read a numpy data array from the zip file :param fp: a file pointer :param local_files: the local files structure :param dir_files: the directory headers :param name: the name of the data file to read :return: the numpy data array, if found The file pointer will be at a location following the local file entry after this method. The local_files and dir_files should be passed from the results of parse_zip. """
if name_bytes in dir_files: fp.seek(local_files[dir_files[name_bytes][1]][1]) return numpy.load(fp) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_json(fp, local_files, dir_files, name_bytes): """ Read json properties from the zip file :param fp: a file pointer :param local_files: the local files structure :param dir_files: the directory headers :param name: the name of the json file to read :return: the json properites as a dictionary, if found The file pointer will be at a location following the local file entry after this method. The local_files and dir_files should be passed from the results of parse_zip. """
if name_bytes in dir_files: json_pos = local_files[dir_files[name_bytes][1]][1] json_len = local_files[dir_files[name_bytes][1]][2] fp.seek(json_pos) json_properties = fp.read(json_len) return json.loads(json_properties.decode("utf-8")) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rewrite_zip(file_path, properties): """ Rewrite the json properties in the zip file :param file_path: the file path to the zip file :param properties: the updated properties to write to the zip file This method will attempt to keep the data file within the zip file intact without rewriting it. However, if the data file is not the first item in the zip file, this method will rewrite it. The properties param must not change during this method. Callers should take care to ensure this does not happen. """
with open(file_path, "r+b") as fp: local_files, dir_files, eocd = parse_zip(fp) # check to make sure directory has two files, named data.npy and metadata.json, and that data.npy is first # TODO: check compression, etc. if len(dir_files) == 2 and b"data.npy" in dir_files and b"metadata.json" in dir_files and dir_files[b"data.npy"][1] == 0: fp.seek(dir_files[b"metadata.json"][1]) dir_data_list = list() local_file_pos = dir_files[b"data.npy"][1] local_file = local_files[local_file_pos] dir_data_list.append((local_file_pos, b"data.npy", local_file[2], local_file[3])) write_zip_fp(fp, None, properties, dir_data_list) else: data = None if b"data.npy" in dir_files: fp.seek(local_files[dir_files[b"data.npy"][1]][1]) data = numpy.load(fp) fp.seek(0) write_zip_fp(fp, data, properties)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_matching(cls, file_path): """ Return whether the given absolute file path is an ndata file. """
if file_path.endswith(".ndata") and os.path.exists(file_path): try: with open(file_path, "r+b") as fp: local_files, dir_files, eocd = parse_zip(fp) contains_data = b"data.npy" in dir_files contains_metadata = b"metadata.json" in dir_files file_count = contains_data + contains_metadata # use fact that True is 1, False is 0 # TODO: make sure ndata isn't compressed, or handle it if len(dir_files) != file_count or file_count == 0: return False return True except Exception as e: logging.error("Exception parsing ndata file: %s", file_path) logging.error(str(e)) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_data(self, data, file_datetime): """ Write data to the ndata file specified by reference. :param data: the numpy array data to write :param file_datetime: the datetime for the file """
with self.__lock: assert data is not None absolute_file_path = self.__file_path #logging.debug("WRITE data file %s for %s", absolute_file_path, key) make_directory_if_needed(os.path.dirname(absolute_file_path)) properties = self.read_properties() if os.path.exists(absolute_file_path) else dict() write_zip(absolute_file_path, data, properties) # convert to utc time. tz_minutes = Utility.local_utcoffset_minutes(file_datetime) timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60 os.utime(absolute_file_path, (time.time(), timestamp))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_properties(self, properties, file_datetime): """ Write properties to the ndata file specified by reference. :param reference: the reference to which to write :param properties: the dict to write to the file :param file_datetime: the datetime for the file The properties param must not change during this method. Callers should take care to ensure this does not happen. """
with self.__lock: absolute_file_path = self.__file_path #logging.debug("WRITE properties %s for %s", absolute_file_path, key) make_directory_if_needed(os.path.dirname(absolute_file_path)) exists = os.path.exists(absolute_file_path) if exists: rewrite_zip(absolute_file_path, Utility.clean_dict(properties)) else: write_zip(absolute_file_path, None, Utility.clean_dict(properties)) # convert to utc time. tz_minutes = Utility.local_utcoffset_minutes(file_datetime) timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60 os.utime(absolute_file_path, (time.time(), timestamp))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_properties(self): """ Read properties from the ndata file reference :param reference: the reference from which to read :return: a tuple of the item_uuid and a dict of the properties """
with self.__lock: absolute_file_path = self.__file_path with open(absolute_file_path, "rb") as fp: local_files, dir_files, eocd = parse_zip(fp) properties = read_json(fp, local_files, dir_files, b"metadata.json") return properties
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_data(self): """ Read data from the ndata file reference :param reference: the reference from which to read :return: a numpy array of the data; maybe None """
with self.__lock: absolute_file_path = self.__file_path #logging.debug("READ data file %s", absolute_file_path) with open(absolute_file_path, "rb") as fp: local_files, dir_files, eocd = parse_zip(fp) return read_data(fp, local_files, dir_files, b"data.npy") return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(self): """ Remove the ndata file reference :param reference: the reference to remove """
with self.__lock: absolute_file_path = self.__file_path #logging.debug("DELETE data file %s", absolute_file_path) if os.path.isfile(absolute_file_path): os.remove(absolute_file_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_menu(self, display_type_menu, document_controller, display_panel): """Build the dynamic menu for the selected display panel. The user accesses this menu by right-clicking on the display panel. The basic menu items are to an empty display panel or a browser display panel. After that, each display controller factory is given a chance to add to the menu. The display controllers (for instance, a scan acquisition controller), may add its own menu items. """
dynamic_live_actions = list() def switch_to_display_content(display_panel_type): self.switch_to_display_content(document_controller, display_panel, display_panel_type, display_panel.display_item) empty_action = display_type_menu.add_menu_item(_("Clear Display Panel"), functools.partial(switch_to_display_content, "empty-display-panel")) display_type_menu.add_separator() data_item_display_action = display_type_menu.add_menu_item(_("Display Item"), functools.partial(switch_to_display_content, "data-display-panel")) thumbnail_browser_action = display_type_menu.add_menu_item(_("Thumbnail Browser"), functools.partial(switch_to_display_content, "thumbnail-browser-display-panel")) grid_browser_action = display_type_menu.add_menu_item(_("Grid Browser"), functools.partial(switch_to_display_content, "browser-display-panel")) display_type_menu.add_separator() display_panel_type = display_panel.display_panel_type empty_action.checked = display_panel_type == "empty" and display_panel.display_panel_controller is None data_item_display_action.checked = display_panel_type == "data_item" thumbnail_browser_action.checked = display_panel_type == "horizontal" grid_browser_action.checked = display_panel_type == "grid" dynamic_live_actions.append(empty_action) dynamic_live_actions.append(data_item_display_action) dynamic_live_actions.append(thumbnail_browser_action) dynamic_live_actions.append(grid_browser_action) for factory in self.__display_controller_factories.values(): dynamic_live_actions.extend(factory.build_menu(display_type_menu, display_panel)) return dynamic_live_actions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bounds(self) -> typing.Tuple[typing.Tuple[float, float], typing.Tuple[float, float]]: """Return the bounds property in relative coordinates. Bounds is a tuple ((top, left), (height, width))"""
...
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vector(self) -> typing.Tuple[typing.Tuple[float, float], typing.Tuple[float, float]]: """Return the vector property in relative coordinates. Vector will be a tuple of tuples ((y_start, x_start), (y_end, x_end))."""
...
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_widget_to_content(self, widget): """Subclasses should call this to add content in the section's top level column."""
self.__section_content_column.add_spacing(4) self.__section_content_column.add(widget)