source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(self, input_grid):
unset = 0
high_labels, num_labels = label(input_grid > self.high_thresh)
region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.
arange(1, num_labels + 1)))[::-1]
output_grid = np.zeros(input_grid.shape, dtype=int)
stack = []
for rank in region_ranking:
label_num = rank + 1
label_i, label_j = np.where(high_labels == label_num)
for i in range(label_i.size):
if output_grid[label_i[i], label_j[i]] == unset:
stack.append((label_i[i], label_j[i]))
while len(stack) > 0:
index = stack.pop()
output_grid[index] = label_num
for i in range(index[0] - 1, index[0] + 2):
for j in range(index[1] - 1, index[1] + 2):
if 0 <= i < output_grid.shape[0
] and 0 <= j < output_grid.shape[1]:
if input_grid[i, j] > self.low_thresh and output_grid[
i, j] == unset:
stack.append((i, j))
return output_grid
|
label
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, input_grid):\n unset = 0\n high_labels, num_labels = label(input_grid > self.high_thresh)\n region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.\n arange(1, num_labels + 1)))[::-1]\n output_grid = np.zeros(input_grid.shape, dtype=int)\n stack = []\n for rank in region_ranking:\n label_num = rank + 1\n label_i, label_j = np.where(high_labels == label_num)\n for i in range(label_i.size):\n if output_grid[label_i[i], label_j[i]] == unset:\n stack.append((label_i[i], label_j[i]))\n while len(stack) > 0:\n index = stack.pop()\n output_grid[index] = label_num\n for i in range(index[0] - 1, index[0] + 2):\n for j in range(index[1] - 1, index[1] + 2):\n if 0 <= i < output_grid.shape[0\n ] and 0 <= j < output_grid.shape[1]:\n if input_grid[i, j] > self.low_thresh and output_grid[\n i, j] == unset:\n stack.append((i, j))\n return output_grid\n",
"role": "user"
},
{
"content": "label",
"role": "assistant"
}
] |
def x(labeled_grid, min_size):
out_grid = np.zeros(labeled_grid.shape, dtype=int)
slices = find_objects(labeled_grid)
j = 1
for i, s in enumerate(slices):
box = labeled_grid[s]
size = np.count_nonzero(box.ravel() == i + 1)
if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:
out_grid[np.where(labeled_grid == i + 1)] = j
j += 1
return out_grid
|
size_filter
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(labeled_grid, min_size):\n out_grid = np.zeros(labeled_grid.shape, dtype=int)\n slices = find_objects(labeled_grid)\n j = 1\n for i, s in enumerate(slices):\n box = labeled_grid[s]\n size = np.count_nonzero(box.ravel() == i + 1)\n if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:\n out_grid[np.where(labeled_grid == i + 1)] = j\n j += 1\n return out_grid\n",
"role": "user"
},
{
"content": "size_filter",
"role": "assistant"
}
] |
def x(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel='Probability of False Detection', ylabel=
'Probability of Detection', title='ROC Curve', ticks=np.arange(0, 1.1,
0.1), dpi=300, legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5),
label_fontsize=14, title_fontsize=16, tick_fontsize=12):
if legend_params is None:
legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True)
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(ticks, ticks, 'k--')
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in
b_set])
pod_range = np.percentile(broc_curves[:, 0], ci, axis=1)
pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
pofd_poly[np.isnan(pofd_poly)] = 0
plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
roc_data = roc_obj.roc_curve()
plt.plot(roc_data['POFD'], roc_data['POD'], marker=markers[r],
color=colors[r], label=obj_labels[r])
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
plt.close()
|
roc_curve
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),\n xlabel='Probability of False Detection', ylabel=\n 'Probability of Detection', title='ROC Curve', ticks=np.arange(0, 1.1, \n 0.1), dpi=300, legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5),\n label_fontsize=14, title_fontsize=16, tick_fontsize=12):\n if legend_params is None:\n legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True)\n plt.figure(figsize=figsize, dpi=dpi)\n plt.plot(ticks, ticks, 'k--')\n if bootstrap_sets is not None:\n for b, b_set in enumerate(bootstrap_sets):\n broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in\n b_set])\n pod_range = np.percentile(broc_curves[:, 0], ci, axis=1)\n pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1)\n pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))\n pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1]))\n pod_poly[np.isnan(pod_poly)] = 0\n pofd_poly[np.isnan(pofd_poly)] = 0\n plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b])\n for r, roc_obj in enumerate(roc_objs):\n roc_data = roc_obj.roc_curve()\n plt.plot(roc_data['POFD'], roc_data['POD'], marker=markers[r],\n color=colors[r], label=obj_labels[r])\n plt.xlabel(xlabel, fontsize=label_fontsize)\n plt.ylabel(ylabel, fontsize=label_fontsize)\n plt.xticks(ticks, fontsize=tick_fontsize)\n plt.yticks(ticks, fontsize=tick_fontsize)\n plt.title(title, fontsize=title_fontsize)\n plt.legend(**legend_params)\n plt.savefig(filename, dpi=dpi, bbox_inches='tight')\n plt.close()\n",
"role": "user"
},
{
"content": "roc_curve",
"role": "assistant"
}
] |
def x(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel='Success Ratio (1-FAR)', ylabel='Probability of Detection',
ticks=np.arange(0, 1.1, 0.1), dpi=300, csi_cmap='Blues', csi_label=
'Critical Success Index', title='Performance Diagram', legend_params=
None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14,
title_fontsize=16, tick_fontsize=12):
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
plt.figure(figsize=figsize)
grid_ticks = np.arange(0, 1.01, 0.01)
sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks)
bias = pod_g / sr_g
csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0)
csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1),
extend='max', cmap=csi_cmap)
b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors=
'k', linestyles='dashed')
plt.clabel(b_contour, fmt='%1.1f', manual=[(0.2, 0.9), (0.4, 0.9), (0.6,
0.9), (0.7, 0.7)])
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
perf_curves = np.dstack([b_roc.performance_curve().values for
b_roc in b_set])
pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1)
sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
sr_poly[np.isnan(sr_poly)] = 1
plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
perf_data = roc_obj.performance_curve()
plt.plot(1 - perf_data['FAR'], perf_data['POD'], marker=markers[r],
color=colors[r], label=obj_labels[r])
cbar = plt.colorbar(csi_contour)
cbar.set_label(csi_label)
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
plt.close()
|
performance_diagram
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),\n xlabel='Success Ratio (1-FAR)', ylabel='Probability of Detection',\n ticks=np.arange(0, 1.1, 0.1), dpi=300, csi_cmap='Blues', csi_label=\n 'Critical Success Index', title='Performance Diagram', legend_params=\n None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14,\n title_fontsize=16, tick_fontsize=12):\n if legend_params is None:\n legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)\n plt.figure(figsize=figsize)\n grid_ticks = np.arange(0, 1.01, 0.01)\n sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks)\n bias = pod_g / sr_g\n csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0)\n csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1),\n extend='max', cmap=csi_cmap)\n b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors=\n 'k', linestyles='dashed')\n plt.clabel(b_contour, fmt='%1.1f', manual=[(0.2, 0.9), (0.4, 0.9), (0.6,\n 0.9), (0.7, 0.7)])\n if bootstrap_sets is not None:\n for b, b_set in enumerate(bootstrap_sets):\n perf_curves = np.dstack([b_roc.performance_curve().values for\n b_roc in b_set])\n pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1)\n sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1)\n pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))\n sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1]))\n pod_poly[np.isnan(pod_poly)] = 0\n sr_poly[np.isnan(sr_poly)] = 1\n plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b])\n for r, roc_obj in enumerate(roc_objs):\n perf_data = roc_obj.performance_curve()\n plt.plot(1 - perf_data['FAR'], perf_data['POD'], marker=markers[r],\n color=colors[r], label=obj_labels[r])\n cbar = plt.colorbar(csi_contour)\n cbar.set_label(csi_label)\n plt.xlabel(xlabel, fontsize=label_fontsize)\n plt.ylabel(ylabel, fontsize=label_fontsize)\n plt.xticks(ticks, fontsize=tick_fontsize)\n plt.yticks(ticks, fontsize=tick_fontsize)\n plt.title(title, fontsize=title_fontsize)\n plt.legend(**legend_params)\n plt.savefig(filename, dpi=dpi, bbox_inches='tight')\n plt.close()\n",
"role": "user"
},
{
"content": "performance_diagram",
"role": "assistant"
}
] |
def x(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel='Forecast Probability', ylabel='Observed Relative Frequency',
ticks=np.arange(0, 1.05, 0.05), dpi=300, inset_size=1.5, title=
'Reliability Diagram', legend_params=None, bootstrap_sets=None, ci=(2.5,
97.5)):
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
fig, ax = plt.subplots(figsize=figsize)
plt.plot(ticks, ticks, 'k--')
inset_hist = inset_axes(ax, width=inset_size, height=inset_size, loc=2)
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
brel_curves = np.dstack([b_rel.reliability_curve().values for
b_rel in b_set])
bin_range = np.percentile(brel_curves[:, 0], ci, axis=1)
rel_range = np.percentile(brel_curves[:, 3], ci, axis=1)
bin_poly = np.concatenate((bin_range[1], bin_range[0, ::-1]))
rel_poly = np.concatenate((rel_range[1], rel_range[0, ::-1]))
bin_poly[np.isnan(bin_poly)] = 0
rel_poly[np.isnan(rel_poly)] = 0
plt.fill(bin_poly, rel_poly, alpha=0.5, color=colors[b])
for r, rel_obj in enumerate(rel_objs):
rel_curve = rel_obj.reliability_curve()
ax.plot(rel_curve['Bin_Start'], rel_curve['Positive_Relative_Freq'],
color=colors[r], marker=markers[r], label=obj_labels[r])
inset_hist.semilogy(rel_curve['Bin_Start'], rel_curve[
'Total_Relative_Freq'], color=colors[r], marker=markers[r])
inset_hist.set_xlabel('Forecast Probability')
inset_hist.set_ylabel('Forecast Relative Frequency')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.legend(**legend_params)
ax.set_title(title)
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
plt.close()
|
reliability_diagram
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8),\n xlabel='Forecast Probability', ylabel='Observed Relative Frequency',\n ticks=np.arange(0, 1.05, 0.05), dpi=300, inset_size=1.5, title=\n 'Reliability Diagram', legend_params=None, bootstrap_sets=None, ci=(2.5,\n 97.5)):\n if legend_params is None:\n legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)\n fig, ax = plt.subplots(figsize=figsize)\n plt.plot(ticks, ticks, 'k--')\n inset_hist = inset_axes(ax, width=inset_size, height=inset_size, loc=2)\n if bootstrap_sets is not None:\n for b, b_set in enumerate(bootstrap_sets):\n brel_curves = np.dstack([b_rel.reliability_curve().values for\n b_rel in b_set])\n bin_range = np.percentile(brel_curves[:, 0], ci, axis=1)\n rel_range = np.percentile(brel_curves[:, 3], ci, axis=1)\n bin_poly = np.concatenate((bin_range[1], bin_range[0, ::-1]))\n rel_poly = np.concatenate((rel_range[1], rel_range[0, ::-1]))\n bin_poly[np.isnan(bin_poly)] = 0\n rel_poly[np.isnan(rel_poly)] = 0\n plt.fill(bin_poly, rel_poly, alpha=0.5, color=colors[b])\n for r, rel_obj in enumerate(rel_objs):\n rel_curve = rel_obj.reliability_curve()\n ax.plot(rel_curve['Bin_Start'], rel_curve['Positive_Relative_Freq'],\n color=colors[r], marker=markers[r], label=obj_labels[r])\n inset_hist.semilogy(rel_curve['Bin_Start'], rel_curve[\n 'Total_Relative_Freq'], color=colors[r], marker=markers[r])\n inset_hist.set_xlabel('Forecast Probability')\n inset_hist.set_ylabel('Forecast Relative Frequency')\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_xticks(ticks)\n ax.set_yticks(ticks)\n ax.legend(**legend_params)\n ax.set_title(title)\n plt.savefig(filename, dpi=dpi, bbox_inches='tight')\n plt.close()\n",
"role": "user"
},
{
"content": "reliability_diagram",
"role": "assistant"
}
] |
def x(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel='Forecast Probability', ylabel='Observed Relative Frequency',
ticks=np.arange(0, 1.05, 0.05), dpi=300, title='Attributes Diagram',
legend_params=None, inset_params=None, inset_position=(0.12, 0.72, 0.25,
0.25), bootstrap_sets=None, ci=(2.5, 97.5)):
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
if inset_params is None:
inset_params = dict(width='25%', height='25%', loc=2, axes_kwargs=
dict(axisbg='white'))
fig, ax = plt.subplots(figsize=figsize)
plt.plot(ticks, ticks, 'k--')
inset_hist = inset_axes(ax, **inset_params)
ip = InsetPosition(ax, inset_position)
inset_hist.set_axes_locator(ip)
climo = rel_objs[0].climatology()
no_skill = 0.5 * ticks + 0.5 * climo
skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo]
skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo]
f = ax.fill(skill_x, skill_y, '0.8')
f[0].set_zorder(1)
ax.plot(ticks, np.ones(ticks.shape) * climo, 'k--')
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
brel_curves = np.vstack([b_rel.reliability_curve()[
'Positive_Relative_Freq'].values for b_rel in b_set])
rel_range = np.nanpercentile(brel_curves, ci, axis=0)
fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1],
rel_range[0], alpha=0.5, color=colors[b])
fb.set_zorder(2)
for r, rel_obj in enumerate(rel_objs):
rel_curve = rel_obj.reliability_curve()
ax.plot(rel_curve['Bin_Start'], rel_curve['Positive_Relative_Freq'],
color=colors[r], marker=markers[r], label=obj_labels[r])
inset_hist.semilogy(rel_curve['Bin_Start'] * 100, rel_obj.
frequencies['Total_Freq'][:-1], color=colors[r], marker=markers[r])
inset_hist.set_xlabel('Forecast Probability')
inset_hist.set_ylabel('Frequency')
ax.annotate('No Skill', (0.6, no_skill[12]), rotation=22.5)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(ticks)
ax.set_xticklabels((ticks * 100).astype(int))
ax.set_yticks(ticks)
ax.set_yticklabels((ticks * 100).astype(int))
ax.legend(**legend_params)
ax.set_title(title)
plt.savefig(filename, dpi=dpi, bbox_inches='tight')
plt.close()
|
attributes_diagram
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8),\n xlabel='Forecast Probability', ylabel='Observed Relative Frequency',\n ticks=np.arange(0, 1.05, 0.05), dpi=300, title='Attributes Diagram',\n legend_params=None, inset_params=None, inset_position=(0.12, 0.72, 0.25,\n 0.25), bootstrap_sets=None, ci=(2.5, 97.5)):\n if legend_params is None:\n legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)\n if inset_params is None:\n inset_params = dict(width='25%', height='25%', loc=2, axes_kwargs=\n dict(axisbg='white'))\n fig, ax = plt.subplots(figsize=figsize)\n plt.plot(ticks, ticks, 'k--')\n inset_hist = inset_axes(ax, **inset_params)\n ip = InsetPosition(ax, inset_position)\n inset_hist.set_axes_locator(ip)\n climo = rel_objs[0].climatology()\n no_skill = 0.5 * ticks + 0.5 * climo\n skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo]\n skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo]\n f = ax.fill(skill_x, skill_y, '0.8')\n f[0].set_zorder(1)\n ax.plot(ticks, np.ones(ticks.shape) * climo, 'k--')\n if bootstrap_sets is not None:\n for b, b_set in enumerate(bootstrap_sets):\n brel_curves = np.vstack([b_rel.reliability_curve()[\n 'Positive_Relative_Freq'].values for b_rel in b_set])\n rel_range = np.nanpercentile(brel_curves, ci, axis=0)\n fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1],\n rel_range[0], alpha=0.5, color=colors[b])\n fb.set_zorder(2)\n for r, rel_obj in enumerate(rel_objs):\n rel_curve = rel_obj.reliability_curve()\n ax.plot(rel_curve['Bin_Start'], rel_curve['Positive_Relative_Freq'],\n color=colors[r], marker=markers[r], label=obj_labels[r])\n inset_hist.semilogy(rel_curve['Bin_Start'] * 100, rel_obj.\n frequencies['Total_Freq'][:-1], color=colors[r], marker=markers[r])\n inset_hist.set_xlabel('Forecast Probability')\n inset_hist.set_ylabel('Frequency')\n ax.annotate('No Skill', (0.6, no_skill[12]), rotation=22.5)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_xticks(ticks)\n ax.set_xticklabels((ticks * 100).astype(int))\n ax.set_yticks(ticks)\n ax.set_yticklabels((ticks * 100).astype(int))\n ax.legend(**legend_params)\n ax.set_title(title)\n plt.savefig(filename, dpi=dpi, bbox_inches='tight')\n plt.close()\n",
"role": "user"
},
{
"content": "attributes_diagram",
"role": "assistant"
}
] |
def x(self, prompt, default_str=None) ->str:
accept_event = threading.Event()
value_ref = [None]
def x():
def x(text):
value_ref[0] = text
accept_event.set()
def x():
accept_event.set()
self.__message_column.remove_all()
pose_get_string_message_box(self.ui, self.__message_column, prompt,
str(default_str), accepted, rejected)
with self.__lock:
self.__q.append(perform)
self.document_controller.add_task('ui_' + str(id(self)), self.
__handle_output_and_q)
accept_event.wait()
def x():
self.__message_column.remove_all()
self.__message_column.add(self.__make_cancel_row())
self.document_controller.add_task('ui_' + str(id(self)),
update_message_column)
if value_ref[0] is None:
raise Exception('Cancel')
return value_ref[0]
|
get_string
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, prompt, default_str=None) ->str:\n accept_event = threading.Event()\n value_ref = [None]\n\n def x():\n\n def x(text):\n value_ref[0] = text\n accept_event.set()\n\n def x():\n accept_event.set()\n self.__message_column.remove_all()\n pose_get_string_message_box(self.ui, self.__message_column, prompt,\n str(default_str), accepted, rejected)\n with self.__lock:\n self.__q.append(perform)\n self.document_controller.add_task('ui_' + str(id(self)), self.\n __handle_output_and_q)\n accept_event.wait()\n\n def x():\n self.__message_column.remove_all()\n self.__message_column.add(self.__make_cancel_row())\n self.document_controller.add_task('ui_' + str(id(self)),\n update_message_column)\n if value_ref[0] is None:\n raise Exception('Cancel')\n return value_ref[0]\n",
"role": "user"
},
{
"content": "get_string",
"role": "assistant"
}
] |
def x(self, prompt, accepted_text, rejected_text, display_rejected):
accept_event = threading.Event()
result_ref = [False]
def x():
def x():
result_ref[0] = True
accept_event.set()
def x():
result_ref[0] = False
accept_event.set()
self.__message_column.remove_all()
pose_confirmation_message_box(self.ui, self.__message_column,
prompt, accepted, rejected, accepted_text, rejected_text,
display_rejected)
with self.__lock:
self.__q.append(perform)
self.document_controller.add_task('ui_' + str(id(self)), self.
__handle_output_and_q)
accept_event.wait()
def x():
self.__message_column.remove_all()
self.__message_column.add(self.__make_cancel_row())
self.document_controller.add_task('ui_' + str(id(self)),
update_message_column)
return result_ref[0]
|
accept_reject
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, prompt, accepted_text, rejected_text, display_rejected):\n accept_event = threading.Event()\n result_ref = [False]\n\n def x():\n\n def x():\n result_ref[0] = True\n accept_event.set()\n\n def x():\n result_ref[0] = False\n accept_event.set()\n self.__message_column.remove_all()\n pose_confirmation_message_box(self.ui, self.__message_column,\n prompt, accepted, rejected, accepted_text, rejected_text,\n display_rejected)\n with self.__lock:\n self.__q.append(perform)\n self.document_controller.add_task('ui_' + str(id(self)), self.\n __handle_output_and_q)\n accept_event.wait()\n\n def x():\n self.__message_column.remove_all()\n self.__message_column.add(self.__make_cancel_row())\n self.document_controller.add_task('ui_' + str(id(self)),\n update_message_column)\n return result_ref[0]\n",
"role": "user"
},
{
"content": "accept_reject",
"role": "assistant"
}
] |
def x(self, r, ytr=None, mask=None):
ytr = self._ytr if ytr is None else ytr
mask = self._mask if mask is None else mask
return compute_weight(r, ytr, mask)
|
compute_weight
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, r, ytr=None, mask=None):\n ytr = self._ytr if ytr is None else ytr\n mask = self._mask if mask is None else mask\n return compute_weight(r, ytr, mask)\n",
"role": "user"
},
{
"content": "compute_weight",
"role": "assistant"
}
] |
def x(self):
if self._multiple_outputs:
if self.hy_test is not None:
r = [(hy.isfinite() and (hyt is None or hyt.isfinite())) for hy,
hyt in zip(self.hy, self.hy_test)]
else:
r = [hy.isfinite() for hy in self.hy]
return np.all(r)
return self.hy.isfinite() and (self.hy_test is None or self.hy_test.
isfinite())
|
isfinite
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._multiple_outputs:\n if self.hy_test is not None:\n r = [(hy.isfinite() and (hyt is None or hyt.isfinite())) for hy,\n hyt in zip(self.hy, self.hy_test)]\n else:\n r = [hy.isfinite() for hy in self.hy]\n return np.all(r)\n return self.hy.isfinite() and (self.hy_test is None or self.hy_test.\n isfinite())\n",
"role": "user"
},
{
"content": "isfinite",
"role": "assistant"
}
] |
def x(argument_name, function_name, search_str, resolve_varname=False):
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _,
_ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if not in_function and tokens[i].typenum == tokenize.NAME and tokens[i
].value == function_name and tokens[i + 1
].typenum == tokenize.OP and tokens[i + 1].value == '(':
in_function = True
continue
elif in_function and tokens[i].typenum == tokenize.NAME and tokens[i
].value == argument_name and tokens[i + 1
].typenum == tokenize.OP and tokens[i + 1].value == '=':
if resolve_varname and tokens[i + 2].typenum == 1:
is_var = True
argument_name = tokens[i + 2].value
break
j = 3
while True:
if tokens[i + j].value in (',', ')') or tokens[i + j
].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i + 2:i + j]]).strip()
if is_var:
for i in range(len(tokens)):
if tokens[i].typenum == tokenize.NAME and tokens[i
].value == argument_name and tokens[i + 1
].typenum == tokenize.OP and tokens[i + 1].value == '=':
return tokens[i + 2].value.strip()
return None
|
value_of_named_argument_in_function
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(argument_name, function_name, search_str, resolve_varname=False):\n try:\n search_str = unicode(search_str)\n except NameError:\n pass\n readline = StringIO(search_str).readline\n try:\n token_generator = tokenize.generate_tokens(readline)\n tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _,\n _ in token_generator]\n except tokenize.TokenError as e:\n raise ValueError('search_str is not parse-able python code: ' + str(e))\n in_function = False\n is_var = False\n for i in range(len(tokens)):\n if not in_function and tokens[i].typenum == tokenize.NAME and tokens[i\n ].value == function_name and tokens[i + 1\n ].typenum == tokenize.OP and tokens[i + 1].value == '(':\n in_function = True\n continue\n elif in_function and tokens[i].typenum == tokenize.NAME and tokens[i\n ].value == argument_name and tokens[i + 1\n ].typenum == tokenize.OP and tokens[i + 1].value == '=':\n if resolve_varname and tokens[i + 2].typenum == 1:\n is_var = True\n argument_name = tokens[i + 2].value\n break\n j = 3\n while True:\n if tokens[i + j].value in (',', ')') or tokens[i + j\n ].typenum == 58:\n break\n j += 1\n return ''.join([t.value for t in tokens[i + 2:i + j]]).strip()\n if is_var:\n for i in range(len(tokens)):\n if tokens[i].typenum == tokenize.NAME and tokens[i\n ].value == argument_name and tokens[i + 1\n ].typenum == tokenize.OP and tokens[i + 1].value == '=':\n return tokens[i + 2].value.strip()\n return None\n",
"role": "user"
},
{
"content": "value_of_named_argument_in_function",
"role": "assistant"
}
] |
def x(regex, filepath, return_match=False):
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content)
|
regex_in_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(regex, filepath, return_match=False):\n file_content = get_file_content(filepath)\n re_method = funcy.re_find if return_match else funcy.re_test\n return re_method(regex, file_content)\n",
"role": "user"
},
{
"content": "regex_in_file",
"role": "assistant"
}
] |
def x(regex, filename, package_name, return_match=False):
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match)
|
regex_in_package_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(regex, filename, package_name, return_match=False):\n filepath = package_file_path(filename, package_name)\n return regex_in_file(regex, filepath, return_match=return_match)\n",
"role": "user"
},
{
"content": "regex_in_package_file",
"role": "assistant"
}
] |
def x(test_str):
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != ''
|
string_is_url
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(test_str):\n parsed = urlparse.urlparse(test_str)\n return parsed.scheme is not None and parsed.scheme != ''\n",
"role": "user"
},
{
"content": "string_is_url",
"role": "assistant"
}
] |
def x(self, item) ->Transaction:
items = self.__build_transaction_items(item)
transaction = Transaction(self, item, items)
self.__transactions.append(transaction)
return transaction
|
item_transaction
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, item) ->Transaction:\n items = self.__build_transaction_items(item)\n transaction = Transaction(self, item, items)\n self.__transactions.append(transaction)\n return transaction\n",
"role": "user"
},
{
"content": "item_transaction",
"role": "assistant"
}
] |
def x(self, before_index, data_item, auto_display: bool=True) ->None:
assert data_item is not None
assert data_item not in self.data_items
assert before_index <= len(self.data_items) and before_index >= 0
assert data_item.uuid not in self.__uuid_to_data_item
data_item.session_id = self.session_id
self.__insert_data_item(before_index, data_item, do_write=True)
if auto_display:
display_item = DisplayItem.DisplayItem(data_item=data_item)
self.append_display_item(display_item)
|
insert_data_item
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, before_index, data_item, auto_display: bool=True) ->None:\n assert data_item is not None\n assert data_item not in self.data_items\n assert before_index <= len(self.data_items) and before_index >= 0\n assert data_item.uuid not in self.__uuid_to_data_item\n data_item.session_id = self.session_id\n self.__insert_data_item(before_index, data_item, do_write=True)\n if auto_display:\n display_item = DisplayItem.DisplayItem(data_item=data_item)\n self.append_display_item(display_item)\n",
"role": "user"
},
{
"content": "insert_data_item",
"role": "assistant"
}
] |
def x(self, data_item: DataItem.DataItem, *, safe: bool=False
) ->typing.Optional[typing.Sequence]:
return self.__cascade_delete(data_item, safe=safe)
|
remove_data_item
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item: DataItem.DataItem, *, safe: bool=False\n ) ->typing.Optional[typing.Sequence]:\n return self.__cascade_delete(data_item, safe=safe)\n",
"role": "user"
},
{
"content": "remove_data_item",
"role": "assistant"
}
] |
def x(self, master_item, safe: bool=False) ->typing.Optional[typing.Sequence]:
if self.__computation_changed_delay_list is None:
computation_changed_delay_list = list()
self.__computation_changed_delay_list = computation_changed_delay_list
else:
computation_changed_delay_list = None
undelete_log = list()
try:
items = list()
dependencies = list()
self.__build_cascade(master_item, items, dependencies)
cascaded = True
while cascaded:
cascaded = False
items_set = set(items)
for computation in copy.copy(self.computations):
output_deleted = master_item in computation._outputs
computation._inputs -= items_set
computation._outputs -= items_set
if (computation not in items and computation != self.
__current_computation):
if output_deleted or all(input in items for input in
computation._inputs):
self.__build_cascade(computation, items, dependencies)
cascaded = True
for source, target in reversed(dependencies):
self.__remove_dependency(source, target)
for item in reversed(items):
for computation in self.computations:
new_entries = computation.list_item_removed(item)
undelete_log.extend(new_entries)
container = item.container
if isinstance(item, DataItem.DataItem):
name = 'data_items'
elif isinstance(item, DisplayItem.DisplayItem):
name = 'display_items'
elif isinstance(item, Graphics.Graphic):
name = 'graphics'
elif isinstance(item, DataStructure.DataStructure):
name = 'data_structures'
elif isinstance(item, Symbolic.Computation):
name = 'computations'
elif isinstance(item, Connection.Connection):
name = 'connections'
elif isinstance(item, DisplayItem.DisplayDataChannel):
name = 'display_data_channels'
else:
name = None
assert False, 'Unable to cascade delete type ' + str(type(item)
)
assert name
if container is self and name == 'data_items':
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
undelete_log.extend(self.__remove_data_item(item, safe=safe))
undelete_log.append({'type': name, 'index': index,
'properties': item_dict})
elif container is self and name == 'display_items':
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
undelete_log.extend(self.__remove_display_item(item, safe=safe)
)
undelete_log.append({'type': name, 'index': index,
'properties': item_dict})
elif container:
container_ref = str(container.uuid)
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
container_properties = container.save_properties() if hasattr(
container, 'save_properties') else dict()
undelete_log.append({'type': name, 'container':
container_ref, 'index': index, 'properties': item_dict,
'container_properties': container_properties})
container.remove_item(name, item)
if container == self and name in ('data_structures',
'computations'):
self.notify_remove_item(name, item, index)
except Exception as e:
import sys, traceback
traceback.print_exc()
traceback.format_exception(*sys.exc_info())
finally:
if computation_changed_delay_list is not None:
self.__finish_computation_changed()
return undelete_log
|
cascade_delete_inner
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, master_item, safe: bool=False) ->typing.Optional[typing.Sequence]:\n if self.__computation_changed_delay_list is None:\n computation_changed_delay_list = list()\n self.__computation_changed_delay_list = computation_changed_delay_list\n else:\n computation_changed_delay_list = None\n undelete_log = list()\n try:\n items = list()\n dependencies = list()\n self.__build_cascade(master_item, items, dependencies)\n cascaded = True\n while cascaded:\n cascaded = False\n items_set = set(items)\n for computation in copy.copy(self.computations):\n output_deleted = master_item in computation._outputs\n computation._inputs -= items_set\n computation._outputs -= items_set\n if (computation not in items and computation != self.\n __current_computation):\n if output_deleted or all(input in items for input in\n computation._inputs):\n self.__build_cascade(computation, items, dependencies)\n cascaded = True\n for source, target in reversed(dependencies):\n self.__remove_dependency(source, target)\n for item in reversed(items):\n for computation in self.computations:\n new_entries = computation.list_item_removed(item)\n undelete_log.extend(new_entries)\n container = item.container\n if isinstance(item, DataItem.DataItem):\n name = 'data_items'\n elif isinstance(item, DisplayItem.DisplayItem):\n name = 'display_items'\n elif isinstance(item, Graphics.Graphic):\n name = 'graphics'\n elif isinstance(item, DataStructure.DataStructure):\n name = 'data_structures'\n elif isinstance(item, Symbolic.Computation):\n name = 'computations'\n elif isinstance(item, Connection.Connection):\n name = 'connections'\n elif isinstance(item, DisplayItem.DisplayDataChannel):\n name = 'display_data_channels'\n else:\n name = None\n assert False, 'Unable to cascade delete type ' + str(type(item)\n )\n assert name\n if container is self and name == 'data_items':\n index = getattr(container, name).index(item)\n item_dict = item.write_to_dict()\n undelete_log.extend(self.__remove_data_item(item, safe=safe))\n undelete_log.append({'type': name, 'index': index,\n 'properties': item_dict})\n elif container is self and name == 'display_items':\n index = getattr(container, name).index(item)\n item_dict = item.write_to_dict()\n undelete_log.extend(self.__remove_display_item(item, safe=safe)\n )\n undelete_log.append({'type': name, 'index': index,\n 'properties': item_dict})\n elif container:\n container_ref = str(container.uuid)\n index = getattr(container, name).index(item)\n item_dict = item.write_to_dict()\n container_properties = container.save_properties() if hasattr(\n container, 'save_properties') else dict()\n undelete_log.append({'type': name, 'container':\n container_ref, 'index': index, 'properties': item_dict,\n 'container_properties': container_properties})\n container.remove_item(name, item)\n if container == self and name in ('data_structures',\n 'computations'):\n self.notify_remove_item(name, item, index)\n except Exception as e:\n import sys, traceback\n traceback.print_exc()\n traceback.format_exception(*sys.exc_info())\n finally:\n if computation_changed_delay_list is not None:\n self.__finish_computation_changed()\n return undelete_log\n",
"role": "user"
},
{
"content": "cascade_delete_inner",
"role": "assistant"
}
] |
def x(self, item) ->typing.List:
with self.__dependency_tree_lock:
return copy.copy(self.__dependency_tree_source_to_target_map.get(
weakref.ref(item), list()))
|
get_dependent_items
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, item) ->typing.List:\n with self.__dependency_tree_lock:\n return copy.copy(self.__dependency_tree_source_to_target_map.get(\n weakref.ref(item), list()))\n",
"role": "user"
},
{
"content": "get_dependent_items",
"role": "assistant"
}
] |
def x(self, item, item_set) ->None:
if not item in item_set:
item_set.add(item)
with self.__dependency_tree_lock:
for dependent in self.get_dependent_items(item):
self.__get_deep_dependent_item_set(dependent, item_set)
|
get_deep_dependent_item_set
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, item, item_set) ->None:\n if not item in item_set:\n item_set.add(item)\n with self.__dependency_tree_lock:\n for dependent in self.get_dependent_items(item):\n self.__get_deep_dependent_item_set(dependent, item_set)\n",
"role": "user"
},
{
"content": "get_deep_dependent_item_set",
"role": "assistant"
}
] |
def x(self, data_item: DataItem.DataItem) ->typing.List[DataItem.DataItem]:
with self.__dependency_tree_lock:
return [data_item for data_item in self.
__dependency_tree_source_to_target_map.get(weakref.ref(
data_item), list()) if isinstance(data_item, DataItem.DataItem)]
|
get_dependent_data_items
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item: DataItem.DataItem) ->typing.List[DataItem.DataItem]:\n with self.__dependency_tree_lock:\n return [data_item for data_item in self.\n __dependency_tree_source_to_target_map.get(weakref.ref(\n data_item), list()) if isinstance(data_item, DataItem.DataItem)]\n",
"role": "user"
},
{
"content": "get_dependent_data_items",
"role": "assistant"
}
] |
def x(self):
class DocumentModelTransaction:
def x(self, document_model):
self.__document_model = document_model
def x(self):
self.__document_model.persistent_object_context.enter_write_delay(
self.__document_model)
return self
def x(self, type, value, traceback):
self.__document_model.persistent_object_context.exit_write_delay(
self.__document_model)
self.__document_model.persistent_object_context.rewrite_item(self
.__document_model)
return DocumentModelTransaction(self)
|
transaction_context
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n\n\n class DocumentModelTransaction:\n\n def x(self, document_model):\n self.__document_model = document_model\n\n def x(self):\n self.__document_model.persistent_object_context.enter_write_delay(\n self.__document_model)\n return self\n\n def x(self, type, value, traceback):\n self.__document_model.persistent_object_context.exit_write_delay(\n self.__document_model)\n self.__document_model.persistent_object_context.rewrite_item(self\n .__document_model)\n return DocumentModelTransaction(self)\n",
"role": "user"
},
{
"content": "transaction_context",
"role": "assistant"
}
] |
def x(self, data_item):
class LiveContextManager:
def x(self, manager, object):
self.__manager = manager
self.__object = object
def x(self):
self.__manager.begin_data_item_live(self.__object)
return self
def x(self, type, value, traceback):
self.__manager.end_data_item_live(self.__object)
return LiveContextManager(self, data_item)
|
data_item_live
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item):\n\n\n class LiveContextManager:\n\n def x(self, manager, object):\n self.__manager = manager\n self.__object = object\n\n def x(self):\n self.__manager.begin_data_item_live(self.__object)\n return self\n\n def x(self, type, value, traceback):\n self.__manager.end_data_item_live(self.__object)\n return LiveContextManager(self, data_item)\n",
"role": "user"
},
{
"content": "data_item_live",
"role": "assistant"
}
] |
def x(self, data_item):
with self.__live_data_items_lock:
old_live_count = self.__live_data_items.get(data_item.uuid, 0)
self.__live_data_items[data_item.uuid] = old_live_count + 1
if old_live_count == 0:
data_item._enter_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.begin_data_item_live(dependent_data_item)
|
begin_data_item_live
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item):\n with self.__live_data_items_lock:\n old_live_count = self.__live_data_items.get(data_item.uuid, 0)\n self.__live_data_items[data_item.uuid] = old_live_count + 1\n if old_live_count == 0:\n data_item._enter_live_state()\n for dependent_data_item in self.get_dependent_data_items(data_item):\n self.begin_data_item_live(dependent_data_item)\n",
"role": "user"
},
{
"content": "begin_data_item_live",
"role": "assistant"
}
] |
def x(self, data_item):
with self.__live_data_items_lock:
live_count = self.__live_data_items.get(data_item.uuid, 0) - 1
assert live_count >= 0
self.__live_data_items[data_item.uuid] = live_count
if live_count == 0:
data_item._exit_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.end_data_item_live(dependent_data_item)
|
end_data_item_live
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item):\n with self.__live_data_items_lock:\n live_count = self.__live_data_items.get(data_item.uuid, 0) - 1\n assert live_count >= 0\n self.__live_data_items[data_item.uuid] = live_count\n if live_count == 0:\n data_item._exit_live_state()\n for dependent_data_item in self.get_dependent_data_items(data_item):\n self.end_data_item_live(dependent_data_item)\n",
"role": "user"
},
{
"content": "end_data_item_live",
"role": "assistant"
}
] |
def x(self, hardware_source: HardwareSource.HardwareSource, data_channel:
HardwareSource.DataChannel):
session_id = self.session_id
key = self.make_data_item_reference_key(hardware_source.
hardware_source_id, data_channel.channel_id)
data_item_reference = self.get_data_item_reference(key)
with data_item_reference.mutex:
data_item = data_item_reference.data_item
if data_item is None:
data_item = DataItem.DataItem()
data_item.ensure_data_source()
data_item.title = '%s (%s)' % (hardware_source.display_name,
data_channel.name
) if data_channel.name else hardware_source.display_name
data_item.category = 'temporary'
data_item_reference.data_item = data_item
def x():
self.append_data_item(data_item)
self._update_data_item_reference(key, data_item)
self.__call_soon(append_data_item)
def x():
if data_item.session_id != session_id:
data_item.session_id = session_id
session_metadata = ApplicationData.get_session_metadata_dict()
if data_item.session_metadata != session_metadata:
data_item.session_metadata = session_metadata
if data_channel.processor:
src_data_channel = hardware_source.data_channels[data_channel
.src_channel_index]
src_data_item_reference = self.get_data_item_reference(self
.make_data_item_reference_key(hardware_source.
hardware_source_id, src_data_channel.channel_id))
data_channel.processor.connect_data_item_reference(
src_data_item_reference)
self.__call_soon(update_session)
return data_item_reference
|
construct_data_item_reference
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, hardware_source: HardwareSource.HardwareSource, data_channel:\n HardwareSource.DataChannel):\n session_id = self.session_id\n key = self.make_data_item_reference_key(hardware_source.\n hardware_source_id, data_channel.channel_id)\n data_item_reference = self.get_data_item_reference(key)\n with data_item_reference.mutex:\n data_item = data_item_reference.data_item\n if data_item is None:\n data_item = DataItem.DataItem()\n data_item.ensure_data_source()\n data_item.title = '%s (%s)' % (hardware_source.display_name,\n data_channel.name\n ) if data_channel.name else hardware_source.display_name\n data_item.category = 'temporary'\n data_item_reference.data_item = data_item\n\n def x():\n self.append_data_item(data_item)\n self._update_data_item_reference(key, data_item)\n self.__call_soon(append_data_item)\n\n def x():\n if data_item.session_id != session_id:\n data_item.session_id = session_id\n session_metadata = ApplicationData.get_session_metadata_dict()\n if data_item.session_metadata != session_metadata:\n data_item.session_metadata = session_metadata\n if data_channel.processor:\n src_data_channel = hardware_source.data_channels[data_channel\n .src_channel_index]\n src_data_item_reference = self.get_data_item_reference(self\n .make_data_item_reference_key(hardware_source.\n hardware_source_id, src_data_channel.channel_id))\n data_channel.processor.connect_data_item_reference(\n src_data_item_reference)\n self.__call_soon(update_session)\n return data_item_reference\n",
"role": "user"
},
{
"content": "construct_data_item_reference",
"role": "assistant"
}
] |
def x(array: numpy.ndarray, x: int) ->numpy.ndarray:
out_array = []
for i in range(x):
if i % (x / (len(array) - 1)) == 0:
index = i / (x / (len(array) - 1))
out_array.append(array[int(index)])
else:
start_marker = array[math.floor(i / (x / (len(array) - 1)))]
stop_marker = array[math.ceil(i / (x / (len(array) - 1)))]
interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1))
interp_color = numpy.rint(start_marker + (stop_marker -
start_marker) * interp_amount)
out_array.append(interp_color)
out_array[-1] = array[-1]
return numpy.array(out_array).astype(numpy.uint8)
|
interpolate_colors
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(array: numpy.ndarray, x: int) ->numpy.ndarray:\n out_array = []\n for i in range(x):\n if i % (x / (len(array) - 1)) == 0:\n index = i / (x / (len(array) - 1))\n out_array.append(array[int(index)])\n else:\n start_marker = array[math.floor(i / (x / (len(array) - 1)))]\n stop_marker = array[math.ceil(i / (x / (len(array) - 1)))]\n interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1))\n interp_color = numpy.rint(start_marker + (stop_marker -\n start_marker) * interp_amount)\n out_array.append(interp_color)\n out_array[-1] = array[-1]\n return numpy.array(out_array).astype(numpy.uint8)\n",
"role": "user"
},
{
"content": "interpolate_colors",
"role": "assistant"
}
] |
def x(self, **args):
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception(
"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be starred"
)
r = requests.put('%s' % BASE_URL + '/gists/%s/star' % self.gist_id,
headers=self.gist.header)
if r.status_code == 204:
response = {'id': self.gist_id}
return response
raise Exception("Gist can't be starred")
|
star
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **args):\n if 'name' in args:\n self.gist_name = args['name']\n self.gist_id = self.getMyID(self.gist_name)\n elif 'id' in args:\n self.gist_id = args['id']\n else:\n raise Exception(\n \"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be starred\"\n )\n r = requests.put('%s' % BASE_URL + '/gists/%s/star' % self.gist_id,\n headers=self.gist.header)\n if r.status_code == 204:\n response = {'id': self.gist_id}\n return response\n raise Exception(\"Gist can't be starred\")\n",
"role": "user"
},
{
"content": "star",
"role": "assistant"
}
] |
def x(self, **args):
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception(
"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be forked"
)
r = requests.post('%s' % BASE_URL + '/gists/%s/forks' % self.gist_id,
headers=self.gist.header)
if r.status_code == 201:
response = {'id': self.gist_id, 'description': r.json()[
'description'], 'public': r.json()['public'], 'comments': r.
json()['comments']}
return response
raise Exception("Gist can't be forked")
|
fork
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **args):\n if 'name' in args:\n self.gist_name = args['name']\n self.gist_id = self.getMyID(self.gist_name)\n elif 'id' in args:\n self.gist_id = args['id']\n else:\n raise Exception(\n \"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be forked\"\n )\n r = requests.post('%s' % BASE_URL + '/gists/%s/forks' % self.gist_id,\n headers=self.gist.header)\n if r.status_code == 201:\n response = {'id': self.gist_id, 'description': r.json()[\n 'description'], 'public': r.json()['public'], 'comments': r.\n json()['comments']}\n return response\n raise Exception(\"Gist can't be forked\")\n",
"role": "user"
},
{
"content": "fork",
"role": "assistant"
}
] |
def x(self, **args):
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception(
"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be checked for star"
)
r = requests.get('%s' % BASE_URL + '/gists/%s/star' % self.gist_id,
headers=self.gist.header)
if r.status_code == 204:
response = {'starred': 'True', 'id': self.gist_id}
else:
response = {'starred': 'False'}
return response
|
checkifstar
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **args):\n if 'name' in args:\n self.gist_name = args['name']\n self.gist_id = self.getMyID(self.gist_name)\n elif 'id' in args:\n self.gist_id = args['id']\n else:\n raise Exception(\n \"Either provide authenticated user's Unambigious Gistname or any unique Gistid to be checked for star\"\n )\n r = requests.get('%s' % BASE_URL + '/gists/%s/star' % self.gist_id,\n headers=self.gist.header)\n if r.status_code == 204:\n response = {'starred': 'True', 'id': self.gist_id}\n else:\n response = {'starred': 'False'}\n return response\n",
"role": "user"
},
{
"content": "checkifstar",
"role": "assistant"
}
] |
def x(self, destino=None, prefix='tmp', suffix='-sat.log'):
if destino:
if os.path.exists(destino):
raise IOError((errno.EEXIST, 'File exists', destino))
destino = os.path.abspath(destino)
fd = os.open(destino, os.O_EXCL | os.O_CREAT | os.O_WRONLY)
else:
fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.write(fd, self.conteudo())
os.fsync(fd)
os.close(fd)
return os.path.abspath(destino)
|
salvar
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, destino=None, prefix='tmp', suffix='-sat.log'):\n if destino:\n if os.path.exists(destino):\n raise IOError((errno.EEXIST, 'File exists', destino))\n destino = os.path.abspath(destino)\n fd = os.open(destino, os.O_EXCL | os.O_CREAT | os.O_WRONLY)\n else:\n fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix)\n os.write(fd, self.conteudo())\n os.fsync(fd)\n os.close(fd)\n return os.path.abspath(destino)\n",
"role": "user"
},
{
"content": "salvar",
"role": "assistant"
}
] |
def x(retorno):
resposta = analisar_retorno(forcar_unicode(retorno), funcao=
'ExtrairLogs', classe_resposta=RespostaExtrairLogs, campos=
RespostaSAT.CAMPOS + (('arquivoLog', unicode),),
campos_alternativos=[RespostaSAT.CAMPOS])
if resposta.EEEEE not in ('15000',):
raise ExcecaoRespostaSAT(resposta)
return resposta
|
analisar
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(retorno):\n resposta = analisar_retorno(forcar_unicode(retorno), funcao=\n 'ExtrairLogs', classe_resposta=RespostaExtrairLogs, campos=\n RespostaSAT.CAMPOS + (('arquivoLog', unicode),),\n campos_alternativos=[RespostaSAT.CAMPOS])\n if resposta.EEEEE not in ('15000',):\n raise ExcecaoRespostaSAT(resposta)\n return resposta\n",
"role": "user"
},
{
"content": "analisar",
"role": "assistant"
}
] |
def x(self):
units = ''
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.
forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], 'units'):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, '_') in file_object.variables.keys(
):
grid_shape = file_object.variables[self.variable.ljust(
6, '_')].shape
else:
print('{0} not found'.format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, '_') in file_object.variables.keys(
):
var_name = self.variable.ljust(6, '_')
else:
print('{0} not found'.format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == '' and hasattr(file_object.variables[var_name],
'units'):
units = file_object.variables[var_name].units
else:
data = None
return data, units
|
load_data_old
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n units = ''\n if len(self.file_objects) == 1 and self.file_objects[0] is not None:\n data = self.file_objects[0].variables[self.variable][self.\n forecast_hours]\n if hasattr(self.file_objects[0].variables[self.variable], 'units'):\n units = self.file_objects[0].variables[self.variable].units\n elif len(self.file_objects) > 1:\n grid_shape = [len(self.file_objects), 1, 1]\n for file_object in self.file_objects:\n if file_object is not None:\n if self.variable in file_object.variables.keys():\n grid_shape = file_object.variables[self.variable].shape\n elif self.variable.ljust(6, '_') in file_object.variables.keys(\n ):\n grid_shape = file_object.variables[self.variable.ljust(\n 6, '_')].shape\n else:\n print('{0} not found'.format(self.variable))\n raise KeyError\n break\n data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))\n for f, file_object in enumerate(self.file_objects):\n if file_object is not None:\n if self.variable in file_object.variables.keys():\n var_name = self.variable\n elif self.variable.ljust(6, '_') in file_object.variables.keys(\n ):\n var_name = self.variable.ljust(6, '_')\n else:\n print('{0} not found'.format(self.variable))\n raise KeyError\n data[f] = file_object.variables[var_name][0]\n if units == '' and hasattr(file_object.variables[var_name],\n 'units'):\n units = file_object.variables[var_name].units\n else:\n data = None\n return data, units\n",
"role": "user"
},
{
"content": "load_data_old",
"role": "assistant"
}
] |
def x(self):
units = ''
if self.file_objects[0] is None:
raise IOError()
var_name, z_index = self.format_var_name(self.variable, list(self.
file_objects[0].variables.keys()))
ntimes = 0
if 'time' in self.file_objects[0].variables[var_name].dimensions:
ntimes = len(self.file_objects[0].dimensions['time'])
if ntimes > 1:
if z_index is None:
data = self.file_objects[0].variables[var_name][self.forecast_hours
].astype(np.float32)
else:
data = self.file_objects[0].variables[var_name][self.
forecast_hours, z_index].astype(np.float32)
else:
y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:]
data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32
)
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if z_index is None:
data[f] = file_object.variables[var_name][0]
else:
data[f] = file_object.variables[var_name][0, z_index]
if hasattr(self.file_objects[0].variables[var_name], 'units'):
units = self.file_objects[0].variables[var_name].units
return data, units
|
load_data
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n units = ''\n if self.file_objects[0] is None:\n raise IOError()\n var_name, z_index = self.format_var_name(self.variable, list(self.\n file_objects[0].variables.keys()))\n ntimes = 0\n if 'time' in self.file_objects[0].variables[var_name].dimensions:\n ntimes = len(self.file_objects[0].dimensions['time'])\n if ntimes > 1:\n if z_index is None:\n data = self.file_objects[0].variables[var_name][self.forecast_hours\n ].astype(np.float32)\n else:\n data = self.file_objects[0].variables[var_name][self.\n forecast_hours, z_index].astype(np.float32)\n else:\n y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:]\n data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32\n )\n for f, file_object in enumerate(self.file_objects):\n if file_object is not None:\n if z_index is None:\n data[f] = file_object.variables[var_name][0]\n else:\n data[f] = file_object.variables[var_name][0, z_index]\n if hasattr(self.file_objects[0].variables[var_name], 'units'):\n units = self.file_objects[0].variables[var_name].units\n return data, units\n",
"role": "user"
},
{
"content": "load_data",
"role": "assistant"
}
] |
def x(variable, var_list):
z_index = None
if variable in var_list:
var_name = variable
elif variable.ljust(6, '_') in var_list:
var_name = variable.ljust(6, '_')
elif any([(variable in v_sub.split('_')) for v_sub in var_list]):
var_name = var_list[[(variable in v_sub.split('_')) for v_sub in
var_list].index(True)]
z_index = var_name.split('_').index(variable)
else:
raise KeyError('{0} not found in {1}'.format(variable, var_list))
return var_name, z_index
|
format_var_name
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(variable, var_list):\n z_index = None\n if variable in var_list:\n var_name = variable\n elif variable.ljust(6, '_') in var_list:\n var_name = variable.ljust(6, '_')\n elif any([(variable in v_sub.split('_')) for v_sub in var_list]):\n var_name = var_list[[(variable in v_sub.split('_')) for v_sub in\n var_list].index(True)]\n z_index = var_name.split('_').index(variable)\n else:\n raise KeyError('{0} not found in {1}'.format(variable, var_list))\n return var_name, z_index\n",
"role": "user"
},
{
"content": "format_var_name",
"role": "assistant"
}
] |
def x(self, mode='train', format='csv'):
if mode in self.data.keys():
run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self
.end_dates[mode], freq='1D')
run_date_str = [d.strftime('%Y%m%d-%H%M') for d in run_dates.date]
print(run_date_str)
all_total_track_files = sorted(glob(getattr(self, mode +
'_data_path') + '*total_' + self.ensemble_name + '*.' + format))
all_step_track_files = sorted(glob(getattr(self, mode +
'_data_path') + '*step_' + self.ensemble_name + '*.' + format))
total_track_files = []
for track_file in all_total_track_files:
file_date = track_file.split('_')[-1][:-4]
if file_date in run_date_str:
total_track_files.append(track_file)
step_track_files = []
for step_file in all_step_track_files:
file_date = step_file.split('_')[-1][:-4]
if file_date in run_date_str:
step_track_files.append(step_file)
self.data[mode]['total'] = pd.concat(map(pd.read_csv,
total_track_files), ignore_index=True)
self.data[mode]['total'] = self.data[mode]['total'].fillna(value=0)
self.data[mode]['total'] = self.data[mode]['total'].replace([np.inf,
-np.inf], 0)
self.data[mode]['step'] = pd.concat(map(pd.read_csv,
step_track_files), ignore_index=True)
self.data[mode]['step'] = self.data[mode]['step'].fillna(value=0)
self.data[mode]['step'] = self.data[mode]['step'].replace([np.inf,
-np.inf], 0)
if mode == 'forecast':
self.data[mode]['step'] = self.data[mode]['step'].drop_duplicates(
'Step_ID')
self.data[mode]['member'] = pd.read_csv(self.member_files[mode])
self.data[mode]['combo'] = pd.merge(self.data[mode]['step'], self.
data[mode]['total'], on=['Track_ID', 'Ensemble_Name',
'Ensemble_Member', 'Run_Date'])
self.data[mode]['combo'] = pd.merge(self.data[mode]['combo'], self.
data[mode]['member'], on='Ensemble_Member')
self.data[mode]['total_group'] = pd.merge(self.data[mode]['total'],
self.data[mode]['member'], on='Ensemble_Member')
|
load_data
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, mode='train', format='csv'):\n if mode in self.data.keys():\n run_dates = pd.DatetimeIndex(start=self.start_dates[mode], end=self\n .end_dates[mode], freq='1D')\n run_date_str = [d.strftime('%Y%m%d-%H%M') for d in run_dates.date]\n print(run_date_str)\n all_total_track_files = sorted(glob(getattr(self, mode +\n '_data_path') + '*total_' + self.ensemble_name + '*.' + format))\n all_step_track_files = sorted(glob(getattr(self, mode +\n '_data_path') + '*step_' + self.ensemble_name + '*.' + format))\n total_track_files = []\n for track_file in all_total_track_files:\n file_date = track_file.split('_')[-1][:-4]\n if file_date in run_date_str:\n total_track_files.append(track_file)\n step_track_files = []\n for step_file in all_step_track_files:\n file_date = step_file.split('_')[-1][:-4]\n if file_date in run_date_str:\n step_track_files.append(step_file)\n self.data[mode]['total'] = pd.concat(map(pd.read_csv,\n total_track_files), ignore_index=True)\n self.data[mode]['total'] = self.data[mode]['total'].fillna(value=0)\n self.data[mode]['total'] = self.data[mode]['total'].replace([np.inf,\n -np.inf], 0)\n self.data[mode]['step'] = pd.concat(map(pd.read_csv,\n step_track_files), ignore_index=True)\n self.data[mode]['step'] = self.data[mode]['step'].fillna(value=0)\n self.data[mode]['step'] = self.data[mode]['step'].replace([np.inf, \n -np.inf], 0)\n if mode == 'forecast':\n self.data[mode]['step'] = self.data[mode]['step'].drop_duplicates(\n 'Step_ID')\n self.data[mode]['member'] = pd.read_csv(self.member_files[mode])\n self.data[mode]['combo'] = pd.merge(self.data[mode]['step'], self.\n data[mode]['total'], on=['Track_ID', 'Ensemble_Name',\n 'Ensemble_Member', 'Run_Date'])\n self.data[mode]['combo'] = pd.merge(self.data[mode]['combo'], self.\n data[mode]['member'], on='Ensemble_Member')\n self.data[mode]['total_group'] = pd.merge(self.data[mode]['total'],\n self.data[mode]['member'], on='Ensemble_Member')\n",
"role": "user"
},
{
"content": "load_data",
"role": "assistant"
}
] |
def x(self, output_file, model_names=('start-time', 'translation-x',
'translation-y'), label_columns=('Start_Time_Error',
'Translation_Error_X', 'Translation_Error_Y')):
if len(self.data['train']) == 0:
self.load_data()
groups = self.data['train']['member'][self.group_col].unique()
copulas = {}
label_columns = list(label_columns)
for group in groups:
print(group)
group_data = self.data['train']['total_group'].loc[self.data[
'train']['total_group'][self.group_col] == group]
group_data = group_data.dropna()
group_data.reset_index(drop=True, inplace=True)
copulas[group] = {}
copulas[group]['mean'] = group_data[label_columns].mean(axis=0).values
copulas[group]['cov'] = np.cov(group_data[label_columns].values.T)
copulas[group]['model_names'] = list(model_names)
del group_data
pickle.dump(copulas, open(output_file, 'w'), pickle.HIGHEST_PROTOCOL)
|
calc_copulas
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, output_file, model_names=('start-time', 'translation-x',\n 'translation-y'), label_columns=('Start_Time_Error',\n 'Translation_Error_X', 'Translation_Error_Y')):\n if len(self.data['train']) == 0:\n self.load_data()\n groups = self.data['train']['member'][self.group_col].unique()\n copulas = {}\n label_columns = list(label_columns)\n for group in groups:\n print(group)\n group_data = self.data['train']['total_group'].loc[self.data[\n 'train']['total_group'][self.group_col] == group]\n group_data = group_data.dropna()\n group_data.reset_index(drop=True, inplace=True)\n copulas[group] = {}\n copulas[group]['mean'] = group_data[label_columns].mean(axis=0).values\n copulas[group]['cov'] = np.cov(group_data[label_columns].values.T)\n copulas[group]['model_names'] = list(model_names)\n del group_data\n pickle.dump(copulas, open(output_file, 'w'), pickle.HIGHEST_PROTOCOL)\n",
"role": "user"
},
{
"content": "calc_copulas",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_column='Matched',
output_threshold=0.0):
print('Fitting condition models')
groups = self.data['train']['member'][self.group_col].unique()
weights = None
for group in groups:
print(group)
group_data = self.data['train']['combo'].loc[self.data['train'][
'combo'][self.group_col] == group]
if self.sector:
lon_obj = data.loc[:, 'Centroid_Lon']
lat_obj = data.loc[:, 'Centroid_Lat']
left_lon, right_lon = self.grid_dict['sw_lon'], self.grid_dict[
'ne_lon']
lower_lat, upper_lat = self.grid_dict['sw_lat'], self.grid_dict[
'ne_lat']
weights = np.where((left_lon <= lon_obj) & (right_lon >=
lon_obj) & (lower_lat <= lat_obj) & (upper_lat >= lat_obj),
1, 0.3)
output_data = np.where(group_data[output_column] > output_threshold,
1, 0)
print('Ones: ', np.count_nonzero(output_data > 0), 'Zeros: ', np.
count_nonzero(output_data == 0))
self.condition_models[group] = {}
for m, model_name in enumerate(model_names):
print(model_name)
self.condition_models[group][model_name] = deepcopy(model_objs[m])
try:
self.condition_models[group][model_name].fit(group_data[
input_columns], output_data, sample_weight=weights)
except:
self.condition_models[group][model_name].fit(group_data[
input_columns], output_data)
if hasattr(self.condition_models[group][model_name],
'best_estimator_'):
print(self.condition_models[group][model_name].best_estimator_)
print(self.condition_models[group][model_name].best_score_)
|
fit_condition_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_column='Matched',\n output_threshold=0.0):\n print('Fitting condition models')\n groups = self.data['train']['member'][self.group_col].unique()\n weights = None\n for group in groups:\n print(group)\n group_data = self.data['train']['combo'].loc[self.data['train'][\n 'combo'][self.group_col] == group]\n if self.sector:\n lon_obj = data.loc[:, 'Centroid_Lon']\n lat_obj = data.loc[:, 'Centroid_Lat']\n left_lon, right_lon = self.grid_dict['sw_lon'], self.grid_dict[\n 'ne_lon']\n lower_lat, upper_lat = self.grid_dict['sw_lat'], self.grid_dict[\n 'ne_lat']\n weights = np.where((left_lon <= lon_obj) & (right_lon >=\n lon_obj) & (lower_lat <= lat_obj) & (upper_lat >= lat_obj),\n 1, 0.3)\n output_data = np.where(group_data[output_column] > output_threshold,\n 1, 0)\n print('Ones: ', np.count_nonzero(output_data > 0), 'Zeros: ', np.\n count_nonzero(output_data == 0))\n self.condition_models[group] = {}\n for m, model_name in enumerate(model_names):\n print(model_name)\n self.condition_models[group][model_name] = deepcopy(model_objs[m])\n try:\n self.condition_models[group][model_name].fit(group_data[\n input_columns], output_data, sample_weight=weights)\n except:\n self.condition_models[group][model_name].fit(group_data[\n input_columns], output_data)\n if hasattr(self.condition_models[group][model_name],\n 'best_estimator_'):\n print(self.condition_models[group][model_name].best_estimator_)\n print(self.condition_models[group][model_name].best_score_)\n",
"role": "user"
},
{
"content": "fit_condition_models",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_column='Matched',
output_threshold=0.5, num_folds=5, threshold_score='ets'):
print('Fitting condition models')
groups = self.data['train']['member'][self.group_col].unique()
weights = None
for group in groups:
print(group)
group_data = self.data['train']['combo'].iloc[np.where(self.data[
'train']['combo'][self.group_col] == group)[0]]
if self.sector:
lon_obj = group_data.loc[:, 'Centroid_Lon']
lat_obj = group_data.loc[:, 'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.
values.ravel())
center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[
'lat_0']
distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -
center_lat) ** 2) for x, y in conus_lat_lon_points])
min_dist, max_minus_min = min(distances), max(distances) - min(
distances)
distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in
distances]
weights = np.array(distance_0_1)
output_data = np.where(group_data.loc[:, output_column] >
output_threshold, 1, 0)
ones = np.count_nonzero(output_data > 0)
print('Ones: ', ones, 'Zeros: ', np.count_nonzero(output_data == 0))
self.condition_models[group] = {}
num_elements = group_data[input_columns].shape[0]
for m, model_name in enumerate(model_names):
print(model_name)
roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01))
self.condition_models[group][model_name] = deepcopy(model_objs[m])
try:
kf = KFold(n_splits=num_folds)
for train_index, test_index in kf.split(group_data[
input_columns].values):
if np.count_nonzero(output_data[train_index]) > 0:
try:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index], sample_weight=
weights[train_index])
except:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index])
cv_preds = self.condition_models[group][model_name
].predict_proba(group_data.iloc[test_index][
input_columns])[:, 1]
roc.update(cv_preds, output_data[test_index])
else:
continue
except TypeError:
kf = KFold(num_elements, n_folds=num_folds)
for train_index, test_index in kf:
if np.count_nonzero(output_data[train_index]) > 0:
try:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index], sample_weight=
weights[train_index])
except:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index])
cv_preds = self.condition_models[group][model_name
].predict_proba(group_data.iloc[test_index][
input_columns])[:, 1]
roc.update(cv_preds, output_data[test_index])
else:
continue
self.condition_models[group][model_name + '_condition_threshold'
], _ = roc.max_threshold_score(threshold_score)
print(model_name + ' condition threshold: {0:0.3f}'.format(self
.condition_models[group][model_name + '_condition_threshold']))
self.condition_models[group][model_name].fit(group_data[
input_columns], output_data)
|
fit_condition_threshold_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_column='Matched',\n output_threshold=0.5, num_folds=5, threshold_score='ets'):\n print('Fitting condition models')\n groups = self.data['train']['member'][self.group_col].unique()\n weights = None\n for group in groups:\n print(group)\n group_data = self.data['train']['combo'].iloc[np.where(self.data[\n 'train']['combo'][self.group_col] == group)[0]]\n if self.sector:\n lon_obj = group_data.loc[:, 'Centroid_Lon']\n lat_obj = group_data.loc[:, 'Centroid_Lat']\n conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.\n values.ravel())\n center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[\n 'lat_0']\n distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -\n center_lat) ** 2) for x, y in conus_lat_lon_points])\n min_dist, max_minus_min = min(distances), max(distances) - min(\n distances)\n distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in\n distances]\n weights = np.array(distance_0_1)\n output_data = np.where(group_data.loc[:, output_column] >\n output_threshold, 1, 0)\n ones = np.count_nonzero(output_data > 0)\n print('Ones: ', ones, 'Zeros: ', np.count_nonzero(output_data == 0))\n self.condition_models[group] = {}\n num_elements = group_data[input_columns].shape[0]\n for m, model_name in enumerate(model_names):\n print(model_name)\n roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01))\n self.condition_models[group][model_name] = deepcopy(model_objs[m])\n try:\n kf = KFold(n_splits=num_folds)\n for train_index, test_index in kf.split(group_data[\n input_columns].values):\n if np.count_nonzero(output_data[train_index]) > 0:\n try:\n self.condition_models[group][model_name].fit(\n group_data.iloc[train_index][input_columns],\n output_data[train_index], sample_weight=\n weights[train_index])\n except:\n self.condition_models[group][model_name].fit(\n group_data.iloc[train_index][input_columns],\n output_data[train_index])\n cv_preds = self.condition_models[group][model_name\n ].predict_proba(group_data.iloc[test_index][\n input_columns])[:, 1]\n roc.update(cv_preds, output_data[test_index])\n else:\n continue\n except TypeError:\n kf = KFold(num_elements, n_folds=num_folds)\n for train_index, test_index in kf:\n if np.count_nonzero(output_data[train_index]) > 0:\n try:\n self.condition_models[group][model_name].fit(\n group_data.iloc[train_index][input_columns],\n output_data[train_index], sample_weight=\n weights[train_index])\n except:\n self.condition_models[group][model_name].fit(\n group_data.iloc[train_index][input_columns],\n output_data[train_index])\n cv_preds = self.condition_models[group][model_name\n ].predict_proba(group_data.iloc[test_index][\n input_columns])[:, 1]\n roc.update(cv_preds, output_data[test_index])\n else:\n continue\n self.condition_models[group][model_name + '_condition_threshold'\n ], _ = roc.max_threshold_score(threshold_score)\n print(model_name + ' condition threshold: {0:0.3f}'.format(self\n .condition_models[group][model_name + '_condition_threshold']))\n self.condition_models[group][model_name].fit(group_data[\n input_columns], output_data)\n",
"role": "user"
},
{
"content": "fit_condition_threshold_models",
"role": "assistant"
}
] |
def x(self, model_names, input_columns, metadata_cols, data_mode='forecast'):
groups = self.condition_models.keys()
predictions = pd.DataFrame(self.data[data_mode]['combo'][metadata_cols])
for group in groups:
print(group)
print(self.condition_models[group])
g_idxs = self.data[data_mode]['combo'][self.group_col] == group
group_count = np.count_nonzero(g_idxs)
if group_count > 0:
for m, model_name in enumerate(model_names):
mn = model_name.replace(' ', '-')
predictions.loc[g_idxs, mn + '_conditionprob'
] = self.condition_models[group][model_name].predict_proba(
self.data[data_mode]['combo'].loc[g_idxs, input_columns])[
:, 1]
predictions.loc[g_idxs, mn + '_conditionthresh'] = np.where(
predictions.loc[g_idxs, mn + '_conditionprob'] >= self.
condition_models[group][model_name +
'_condition_threshold'], 1, 0)
return predictions
|
predict_condition_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, input_columns, metadata_cols, data_mode='forecast'):\n groups = self.condition_models.keys()\n predictions = pd.DataFrame(self.data[data_mode]['combo'][metadata_cols])\n for group in groups:\n print(group)\n print(self.condition_models[group])\n g_idxs = self.data[data_mode]['combo'][self.group_col] == group\n group_count = np.count_nonzero(g_idxs)\n if group_count > 0:\n for m, model_name in enumerate(model_names):\n mn = model_name.replace(' ', '-')\n predictions.loc[g_idxs, mn + '_conditionprob'\n ] = self.condition_models[group][model_name].predict_proba(\n self.data[data_mode]['combo'].loc[g_idxs, input_columns])[\n :, 1]\n predictions.loc[g_idxs, mn + '_conditionthresh'] = np.where(\n predictions.loc[g_idxs, mn + '_conditionprob'] >= self.\n condition_models[group][model_name +\n '_condition_threshold'], 1, 0)\n return predictions\n",
"role": "user"
},
{
"content": "predict_condition_models",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_columns=None,
calibrate=False):
if output_columns is None:
output_columns = ['Shape', 'Location', 'Scale']
groups = np.unique(self.data['train']['member'][self.group_col])
weights = None
for group in groups:
group_data = self.data['train']['combo'].loc[self.data['train'][
'combo'][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data[group_data[output_columns[-1]] > 0]
if self.sector:
lon_obj = group_data.loc[:, 'Centroid_Lon']
lat_obj = group_data.loc[:, 'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.
values.ravel())
center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[
'lat_0']
distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -
center_lat) ** 2) for x, y in conus_lat_lon_points])
min_dist, max_minus_min = min(distances), max(distances) - min(
distances)
distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in
distances]
weights = np.array(distance_0_1)
self.size_distribution_models[group] = {'multi': {}, 'lognorm': {}}
if calibrate:
self.size_distribution_models[group]['calshape'] = {}
self.size_distribution_models[group]['calscale'] = {}
log_labels = np.log(group_data[output_columns].values)
log_means = log_labels.mean(axis=0)
log_sds = log_labels.std(axis=0)
self.size_distribution_models[group]['lognorm']['mean'] = log_means
self.size_distribution_models[group]['lognorm']['sd'] = log_sds
for m, model_name in enumerate(model_names):
print(group, model_name)
self.size_distribution_models[group]['multi'][model_name
] = deepcopy(model_objs[m])
try:
self.size_distribution_models[group]['multi'][model_name].fit(
group_data[input_columns], (log_labels - log_means) /
log_sds, sample_weight=weights)
except:
self.size_distribution_models[group]['multi'][model_name].fit(
group_data[input_columns], (log_labels - log_means) /
log_sds)
if calibrate:
training_predictions = self.size_distribution_models[group][
'multi'][model_name].predict(group_data[input_columns])
self.size_distribution_models[group]['calshape'][model_name
] = LinearRegression()
self.size_distribution_models[group]['calshape'][model_name
].fit(training_predictions[:, 0:1], (log_labels[:, 0] -
log_means[0]) / log_sds[0], sample_weight=weights)
self.size_distribution_models[group]['calscale'][model_name
] = LinearRegression()
self.size_distribution_models[group]['calscale'][model_name
].fit(training_predictions[:, 1:], (log_labels[:, 1] -
log_means[1]) / log_sds[1], sample_weight=weights)
|
fit_size_distribution_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_columns=None,\n calibrate=False):\n if output_columns is None:\n output_columns = ['Shape', 'Location', 'Scale']\n groups = np.unique(self.data['train']['member'][self.group_col])\n weights = None\n for group in groups:\n group_data = self.data['train']['combo'].loc[self.data['train'][\n 'combo'][self.group_col] == group]\n group_data = group_data.dropna()\n group_data = group_data[group_data[output_columns[-1]] > 0]\n if self.sector:\n lon_obj = group_data.loc[:, 'Centroid_Lon']\n lat_obj = group_data.loc[:, 'Centroid_Lat']\n conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.\n values.ravel())\n center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[\n 'lat_0']\n distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -\n center_lat) ** 2) for x, y in conus_lat_lon_points])\n min_dist, max_minus_min = min(distances), max(distances) - min(\n distances)\n distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in\n distances]\n weights = np.array(distance_0_1)\n self.size_distribution_models[group] = {'multi': {}, 'lognorm': {}}\n if calibrate:\n self.size_distribution_models[group]['calshape'] = {}\n self.size_distribution_models[group]['calscale'] = {}\n log_labels = np.log(group_data[output_columns].values)\n log_means = log_labels.mean(axis=0)\n log_sds = log_labels.std(axis=0)\n self.size_distribution_models[group]['lognorm']['mean'] = log_means\n self.size_distribution_models[group]['lognorm']['sd'] = log_sds\n for m, model_name in enumerate(model_names):\n print(group, model_name)\n self.size_distribution_models[group]['multi'][model_name\n ] = deepcopy(model_objs[m])\n try:\n self.size_distribution_models[group]['multi'][model_name].fit(\n group_data[input_columns], (log_labels - log_means) /\n log_sds, sample_weight=weights)\n except:\n self.size_distribution_models[group]['multi'][model_name].fit(\n group_data[input_columns], (log_labels - log_means) /\n log_sds)\n if calibrate:\n training_predictions = self.size_distribution_models[group][\n 'multi'][model_name].predict(group_data[input_columns])\n self.size_distribution_models[group]['calshape'][model_name\n ] = LinearRegression()\n self.size_distribution_models[group]['calshape'][model_name\n ].fit(training_predictions[:, 0:1], (log_labels[:, 0] -\n log_means[0]) / log_sds[0], sample_weight=weights)\n self.size_distribution_models[group]['calscale'][model_name\n ] = LinearRegression()\n self.size_distribution_models[group]['calscale'][model_name\n ].fit(training_predictions[:, 1:], (log_labels[:, 1] -\n log_means[1]) / log_sds[1], sample_weight=weights)\n",
"role": "user"
},
{
"content": "fit_size_distribution_models",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_columns):
groups = np.unique(self.data['train']['member'][self.group_col])
weights = None
for group in groups:
print(group)
group_data = self.data['train']['combo'].loc[self.data['train'][
'combo'][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data.loc[group_data[output_columns[-1]] > 0]
if self.sector:
lon_obj = group_data.loc[:, 'Centroid_Lon']
lat_obj = group_data.loc[:, 'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.
values.ravel())
center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[
'lat_0']
distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -
center_lat) ** 2) for x, y in conus_lat_lon_points])
min_dist, max_minus_min = min(distances), max(distances) - min(
distances)
distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in
distances]
weights = np.array(distance_0_1)
self.size_distribution_models[group] = {'lognorm': {}}
self.size_distribution_models[group]['lognorm']['pca'] = PCA(
n_components=len(output_columns))
log_labels = np.log(group_data[output_columns].values)
log_labels[:, np.where(output_columns == 'Shape')[0]] *= -1
log_means = log_labels.mean(axis=0)
log_sds = log_labels.std(axis=0)
log_norm_labels = (log_labels - log_means) / log_sds
out_pc_labels = self.size_distribution_models[group]['lognorm']['pca'
].fit_transform(log_norm_labels)
self.size_distribution_models[group]['lognorm']['mean'] = log_means
self.size_distribution_models[group]['lognorm']['sd'] = log_sds
for comp in range(len(output_columns)):
self.size_distribution_models[group]['pc_{0:d}'.format(comp)
] = dict()
for m, model_name in enumerate(model_names):
print(model_name, comp)
self.size_distribution_models[group]['pc_{0:d}'.format(comp)][
model_name] = deepcopy(model_objs[m])
try:
self.size_distribution_models[group]['pc_{0:d}'.format(
comp)][model_name].fit(group_data[input_columns],
out_pc_labels[:, comp], sample_weight=weights)
except:
self.size_distribution_models[group]['pc_{0:d}'.format(
comp)][model_name].fit(group_data[input_columns],
out_pc_labels[:, comp])
return
|
fit_size_distribution_component_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_columns):\n groups = np.unique(self.data['train']['member'][self.group_col])\n weights = None\n for group in groups:\n print(group)\n group_data = self.data['train']['combo'].loc[self.data['train'][\n 'combo'][self.group_col] == group]\n group_data = group_data.dropna()\n group_data = group_data.loc[group_data[output_columns[-1]] > 0]\n if self.sector:\n lon_obj = group_data.loc[:, 'Centroid_Lon']\n lat_obj = group_data.loc[:, 'Centroid_Lat']\n conus_lat_lon_points = zip(lon_obj.values.ravel(), lat_obj.\n values.ravel())\n center_lon, center_lat = self.proj_dict['lon_0'], self.proj_dict[\n 'lat_0']\n distances = np.array([np.sqrt((x - center_lon) ** 2 + (y -\n center_lat) ** 2) for x, y in conus_lat_lon_points])\n min_dist, max_minus_min = min(distances), max(distances) - min(\n distances)\n distance_0_1 = [(1.0 - (d - min_dist) / max_minus_min) for d in\n distances]\n weights = np.array(distance_0_1)\n self.size_distribution_models[group] = {'lognorm': {}}\n self.size_distribution_models[group]['lognorm']['pca'] = PCA(\n n_components=len(output_columns))\n log_labels = np.log(group_data[output_columns].values)\n log_labels[:, np.where(output_columns == 'Shape')[0]] *= -1\n log_means = log_labels.mean(axis=0)\n log_sds = log_labels.std(axis=0)\n log_norm_labels = (log_labels - log_means) / log_sds\n out_pc_labels = self.size_distribution_models[group]['lognorm']['pca'\n ].fit_transform(log_norm_labels)\n self.size_distribution_models[group]['lognorm']['mean'] = log_means\n self.size_distribution_models[group]['lognorm']['sd'] = log_sds\n for comp in range(len(output_columns)):\n self.size_distribution_models[group]['pc_{0:d}'.format(comp)\n ] = dict()\n for m, model_name in enumerate(model_names):\n print(model_name, comp)\n self.size_distribution_models[group]['pc_{0:d}'.format(comp)][\n model_name] = deepcopy(model_objs[m])\n try:\n self.size_distribution_models[group]['pc_{0:d}'.format(\n comp)][model_name].fit(group_data[input_columns],\n out_pc_labels[:, comp], sample_weight=weights)\n except:\n self.size_distribution_models[group]['pc_{0:d}'.format(\n comp)][model_name].fit(group_data[input_columns],\n out_pc_labels[:, comp])\n return\n",
"role": "user"
},
{
"content": "fit_size_distribution_component_models",
"role": "assistant"
}
] |
def x(self, model_names, input_columns, metadata_cols, data_mode='forecast',
location=6, calibrate=False):
groups = self.size_distribution_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]['combo'].loc[self.data[data_mode]
['combo'][self.group_col] == group]
predictions[group] = group_data[metadata_cols]
if group_data.shape[0] > 0:
log_mean = self.size_distribution_models[group]['lognorm']['mean']
log_sd = self.size_distribution_models[group]['lognorm']['sd']
for m, model_name in enumerate(model_names):
multi_predictions = self.size_distribution_models[group][
'multi'][model_name].predict(group_data[input_columns])
if calibrate:
multi_predictions[:, 0] = self.size_distribution_models[
group]['calshape'][model_name].predict(
multi_predictions[:, 0:1])
multi_predictions[:, 1] = self.size_distribution_models[
group]['calscale'][model_name].predict(
multi_predictions[:, 1:])
multi_predictions = np.exp(multi_predictions * log_sd +
log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.
shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(['shape', 'location', 'scale']):
predictions[group][model_name].loc[:, model_name.
replace(' ', '-') + '_' + pred_col
] = multi_predictions[:, p]
return predictions
|
predict_size_distribution_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, input_columns, metadata_cols, data_mode='forecast',\n location=6, calibrate=False):\n groups = self.size_distribution_models.keys()\n predictions = {}\n for group in groups:\n group_data = self.data[data_mode]['combo'].loc[self.data[data_mode]\n ['combo'][self.group_col] == group]\n predictions[group] = group_data[metadata_cols]\n if group_data.shape[0] > 0:\n log_mean = self.size_distribution_models[group]['lognorm']['mean']\n log_sd = self.size_distribution_models[group]['lognorm']['sd']\n for m, model_name in enumerate(model_names):\n multi_predictions = self.size_distribution_models[group][\n 'multi'][model_name].predict(group_data[input_columns])\n if calibrate:\n multi_predictions[:, 0] = self.size_distribution_models[\n group]['calshape'][model_name].predict(\n multi_predictions[:, 0:1])\n multi_predictions[:, 1] = self.size_distribution_models[\n group]['calscale'][model_name].predict(\n multi_predictions[:, 1:])\n multi_predictions = np.exp(multi_predictions * log_sd +\n log_mean)\n if multi_predictions.shape[1] == 2:\n multi_predictions_temp = np.zeros((multi_predictions.\n shape[0], 3))\n multi_predictions_temp[:, 0] = multi_predictions[:, 0]\n multi_predictions_temp[:, 1] = location\n multi_predictions_temp[:, 2] = multi_predictions[:, 1]\n multi_predictions = multi_predictions_temp\n for p, pred_col in enumerate(['shape', 'location', 'scale']):\n predictions[group][model_name].loc[:, model_name.\n replace(' ', '-') + '_' + pred_col\n ] = multi_predictions[:, p]\n return predictions\n",
"role": "user"
},
{
"content": "predict_size_distribution_models",
"role": "assistant"
}
] |
def x(self, model_names, input_columns, output_columns, metadata_cols,
data_mode='forecast', location=6):
groups = self.size_distribution_models.keys()
predictions = pd.DataFrame(self.data[data_mode]['combo'][metadata_cols])
for group in groups:
group_idxs = self.data[data_mode]['combo'][self.group_col] == group
group_count = np.count_nonzero(group_idxs)
print(self.size_distribution_models[group])
if group_count > 0:
log_mean = self.size_distribution_models[group]['lognorm']['mean']
log_sd = self.size_distribution_models[group]['lognorm']['sd']
for m, model_name in enumerate(model_names):
raw_preds = np.zeros((group_count, len(output_columns)))
for c in range(len(output_columns)):
raw_preds[:, c] = self.size_distribution_models[group][
'pc_{0:d}'.format(c)][model_name].predict(self.data
[data_mode]['combo'].loc[group_idxs, input_columns])
log_norm_preds = self.size_distribution_models[group]['lognorm'
]['pca'].inverse_transform(raw_preds)
log_norm_preds[:, 0] *= -1
multi_predictions = np.exp(log_norm_preds * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.
shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(['shape', 'location', 'scale']):
predictions.loc[group_idxs, model_name.replace(' ', '-'
) + '_' + pred_col] = multi_predictions[:, p]
return predictions
|
predict_size_distribution_component_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, input_columns, output_columns, metadata_cols,\n data_mode='forecast', location=6):\n groups = self.size_distribution_models.keys()\n predictions = pd.DataFrame(self.data[data_mode]['combo'][metadata_cols])\n for group in groups:\n group_idxs = self.data[data_mode]['combo'][self.group_col] == group\n group_count = np.count_nonzero(group_idxs)\n print(self.size_distribution_models[group])\n if group_count > 0:\n log_mean = self.size_distribution_models[group]['lognorm']['mean']\n log_sd = self.size_distribution_models[group]['lognorm']['sd']\n for m, model_name in enumerate(model_names):\n raw_preds = np.zeros((group_count, len(output_columns)))\n for c in range(len(output_columns)):\n raw_preds[:, c] = self.size_distribution_models[group][\n 'pc_{0:d}'.format(c)][model_name].predict(self.data\n [data_mode]['combo'].loc[group_idxs, input_columns])\n log_norm_preds = self.size_distribution_models[group]['lognorm'\n ]['pca'].inverse_transform(raw_preds)\n log_norm_preds[:, 0] *= -1\n multi_predictions = np.exp(log_norm_preds * log_sd + log_mean)\n if multi_predictions.shape[1] == 2:\n multi_predictions_temp = np.zeros((multi_predictions.\n shape[0], 3))\n multi_predictions_temp[:, 0] = multi_predictions[:, 0]\n multi_predictions_temp[:, 1] = location\n multi_predictions_temp[:, 2] = multi_predictions[:, 1]\n multi_predictions = multi_predictions_temp\n for p, pred_col in enumerate(['shape', 'location', 'scale']):\n predictions.loc[group_idxs, model_name.replace(' ', '-'\n ) + '_' + pred_col] = multi_predictions[:, p]\n return predictions\n",
"role": "user"
},
{
"content": "predict_size_distribution_component_models",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_column=
'Hail_Size', output_start=5, output_step=5, output_stop=100):
print('Fitting size models')
groups = self.data['train']['member'][self.group_col].unique()
output_start = int(output_start)
output_step = int(output_step)
output_stop = int(output_stop)
for group in groups:
group_data = self.data['train']['combo'].loc[self.data['train'][
'combo'][self.group_col] == group]
group_data.dropna(inplace=True)
group_data = group_data[group_data[output_column] >= output_start]
output_data = group_data[output_column].values.astype(int)
output_data[output_data > output_stop] = output_stop
discrete_data = (output_data - output_start
) // output_step * output_step + output_start
self.size_models[group] = {}
self.size_models[group]['outputvalues'] = np.arange(output_start,
output_stop + output_step, output_step, dtype=int)
for m, model_name in enumerate(model_names):
print('{0} {1}'.format(group, model_name))
self.size_models[group][model_name] = deepcopy(model_objs[m])
self.size_models[group][model_name].fit(group_data[
input_columns], discrete_data)
|
fit_size_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_column=\n 'Hail_Size', output_start=5, output_step=5, output_stop=100):\n print('Fitting size models')\n groups = self.data['train']['member'][self.group_col].unique()\n output_start = int(output_start)\n output_step = int(output_step)\n output_stop = int(output_stop)\n for group in groups:\n group_data = self.data['train']['combo'].loc[self.data['train'][\n 'combo'][self.group_col] == group]\n group_data.dropna(inplace=True)\n group_data = group_data[group_data[output_column] >= output_start]\n output_data = group_data[output_column].values.astype(int)\n output_data[output_data > output_stop] = output_stop\n discrete_data = (output_data - output_start\n ) // output_step * output_step + output_start\n self.size_models[group] = {}\n self.size_models[group]['outputvalues'] = np.arange(output_start, \n output_stop + output_step, output_step, dtype=int)\n for m, model_name in enumerate(model_names):\n print('{0} {1}'.format(group, model_name))\n self.size_models[group][model_name] = deepcopy(model_objs[m])\n self.size_models[group][model_name].fit(group_data[\n input_columns], discrete_data)\n",
"role": "user"
},
{
"content": "fit_size_models",
"role": "assistant"
}
] |
def x(self, model_names, input_columns, metadata_cols, data_mode='forecast'):
groups = self.size_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]['combo'].loc[self.data[data_mode]
['combo'][self.group_col] == group]
if group_data.shape[0] > 0:
predictions[group] = {}
output_values = self.size_models[group]['outputvalues'].astype(int)
for m, model_name in enumerate(model_names):
print('{0} {1}'.format(group, model_name))
pred_col_names = [(model_name.replace(' ', '-') +
'_{0:02d}'.format(p)) for p in output_values]
predictions[group][model_name] = group_data[metadata_cols]
pred_vals = self.size_models[group][model_name].predict_proba(
group_data[input_columns])
pred_classes = self.size_models[group][model_name].classes_
pred_pdf = np.zeros((pred_vals.shape[0], output_values.size))
for pcv, pc in enumerate(pred_classes):
idx = np.where(output_values == pc)[0][0]
pred_pdf[:, idx] = pred_vals[:, pcv]
for pcn, pred_col_name in enumerate(pred_col_names):
predictions[group][model_name].loc[:, pred_col_name
] = pred_pdf[:, pcn]
return predictions
|
predict_size_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, input_columns, metadata_cols, data_mode='forecast'):\n groups = self.size_models.keys()\n predictions = {}\n for group in groups:\n group_data = self.data[data_mode]['combo'].loc[self.data[data_mode]\n ['combo'][self.group_col] == group]\n if group_data.shape[0] > 0:\n predictions[group] = {}\n output_values = self.size_models[group]['outputvalues'].astype(int)\n for m, model_name in enumerate(model_names):\n print('{0} {1}'.format(group, model_name))\n pred_col_names = [(model_name.replace(' ', '-') +\n '_{0:02d}'.format(p)) for p in output_values]\n predictions[group][model_name] = group_data[metadata_cols]\n pred_vals = self.size_models[group][model_name].predict_proba(\n group_data[input_columns])\n pred_classes = self.size_models[group][model_name].classes_\n pred_pdf = np.zeros((pred_vals.shape[0], output_values.size))\n for pcv, pc in enumerate(pred_classes):\n idx = np.where(output_values == pc)[0][0]\n pred_pdf[:, idx] = pred_vals[:, pcv]\n for pcn, pred_col_name in enumerate(pred_col_names):\n predictions[group][model_name].loc[:, pred_col_name\n ] = pred_pdf[:, pcn]\n return predictions\n",
"role": "user"
},
{
"content": "predict_size_models",
"role": "assistant"
}
] |
def x(self, model_names, model_objs, input_columns, output_columns,
output_ranges):
print('Fitting track models')
groups = self.data['train']['member'][self.group_col].unique()
for group in groups:
group_data = self.data['train']['combo'].loc[self.data['train'][
'combo'][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data.loc[group_data['Duration_Step'] == 1]
for model_type, model_dict in self.track_models.items():
model_dict[group] = {}
output_data = group_data[output_columns[model_type]].values.astype(
int)
output_data[output_data < output_ranges[model_type][0]
] = output_ranges[model_type][0]
output_data[output_data > output_ranges[model_type][1]
] = output_ranges[model_type][1]
discrete_data = (output_data - output_ranges[model_type][0]
) // output_ranges[model_type][2] * output_ranges[model_type][2
] + output_ranges[model_type][0]
model_dict[group]['outputvalues'] = np.arange(output_ranges[
model_type][0], output_ranges[model_type][1] +
output_ranges[model_type][2], output_ranges[model_type][2])
for m, model_name in enumerate(model_names):
print('{0} {1} {2}'.format(group, model_type, model_name))
model_dict[group][model_name] = deepcopy(model_objs[m])
model_dict[group][model_name].fit(group_data[input_columns],
discrete_data)
|
fit_track_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_names, model_objs, input_columns, output_columns,\n output_ranges):\n print('Fitting track models')\n groups = self.data['train']['member'][self.group_col].unique()\n for group in groups:\n group_data = self.data['train']['combo'].loc[self.data['train'][\n 'combo'][self.group_col] == group]\n group_data = group_data.dropna()\n group_data = group_data.loc[group_data['Duration_Step'] == 1]\n for model_type, model_dict in self.track_models.items():\n model_dict[group] = {}\n output_data = group_data[output_columns[model_type]].values.astype(\n int)\n output_data[output_data < output_ranges[model_type][0]\n ] = output_ranges[model_type][0]\n output_data[output_data > output_ranges[model_type][1]\n ] = output_ranges[model_type][1]\n discrete_data = (output_data - output_ranges[model_type][0]\n ) // output_ranges[model_type][2] * output_ranges[model_type][2\n ] + output_ranges[model_type][0]\n model_dict[group]['outputvalues'] = np.arange(output_ranges[\n model_type][0], output_ranges[model_type][1] +\n output_ranges[model_type][2], output_ranges[model_type][2])\n for m, model_name in enumerate(model_names):\n print('{0} {1} {2}'.format(group, model_type, model_name))\n model_dict[group][model_name] = deepcopy(model_objs[m])\n model_dict[group][model_name].fit(group_data[input_columns],\n discrete_data)\n",
"role": "user"
},
{
"content": "fit_track_models",
"role": "assistant"
}
] |
def x(self, model_path):
for group, condition_model_set in self.condition_models.items():
for model_name, model_obj in condition_model_set.items():
out_filename = model_path + '{0}_{1}_condition.pkl'.format(group,
model_name.replace(' ', '-'))
with open(out_filename, 'wb') as pickle_file:
pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL)
for group, size_model_set in self.size_models.items():
for model_name, model_obj in size_model_set.items():
out_filename = model_path + '{0}_{1}_size.pkl'.format(group,
model_name.replace(' ', '-'))
with open(out_filename, 'wb') as pickle_file:
pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL)
for group, dist_model_set in self.size_distribution_models.items():
for model_type, model_objs in dist_model_set.items():
for model_name, model_obj in model_objs.items():
out_filename = model_path + '{0}_{1}_{2}_sizedist.pkl'.format(
group, model_name.replace(' ', '-'), model_type)
with open(out_filename, 'wb') as pickle_file:
pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL
)
for model_type, track_type_models in self.track_models.items():
for group, track_model_set in track_type_models.items():
for model_name, model_obj in track_model_set.items():
out_filename = model_path + '{0}_{1}_{2}_track.pkl'.format(
group, model_name.replace(' ', '-'), model_type)
with open(out_filename, 'wb') as pickle_file:
pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL
)
return
|
save_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_path):\n for group, condition_model_set in self.condition_models.items():\n for model_name, model_obj in condition_model_set.items():\n out_filename = model_path + '{0}_{1}_condition.pkl'.format(group,\n model_name.replace(' ', '-'))\n with open(out_filename, 'wb') as pickle_file:\n pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL)\n for group, size_model_set in self.size_models.items():\n for model_name, model_obj in size_model_set.items():\n out_filename = model_path + '{0}_{1}_size.pkl'.format(group,\n model_name.replace(' ', '-'))\n with open(out_filename, 'wb') as pickle_file:\n pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL)\n for group, dist_model_set in self.size_distribution_models.items():\n for model_type, model_objs in dist_model_set.items():\n for model_name, model_obj in model_objs.items():\n out_filename = model_path + '{0}_{1}_{2}_sizedist.pkl'.format(\n group, model_name.replace(' ', '-'), model_type)\n with open(out_filename, 'wb') as pickle_file:\n pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL\n )\n for model_type, track_type_models in self.track_models.items():\n for group, track_model_set in track_type_models.items():\n for model_name, model_obj in track_model_set.items():\n out_filename = model_path + '{0}_{1}_{2}_track.pkl'.format(\n group, model_name.replace(' ', '-'), model_type)\n with open(out_filename, 'wb') as pickle_file:\n pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL\n )\n return\n",
"role": "user"
},
{
"content": "save_models",
"role": "assistant"
}
] |
def x(self, model_path):
condition_model_files = sorted(glob(model_path + '*_condition.pkl'))
if len(condition_model_files) > 0:
for condition_model_file in condition_model_files:
model_comps = condition_model_file.split('/')[-1][:-4].split('_')
if model_comps[0] not in self.condition_models.keys():
self.condition_models[model_comps[0]] = {}
model_name = model_comps[1].replace('-', ' ')
with open(condition_model_file, 'rb') as cmf:
if 'condition_threshold' in condition_model_file:
self.condition_models[model_comps[0]][model_name +
'_condition_threshold'] = pickle.load(cmf)
else:
self.condition_models[model_comps[0]][model_name
] = pickle.load(cmf)
size_model_files = sorted(glob(model_path + '*_size.pkl'))
if len(size_model_files) > 0:
for size_model_file in size_model_files:
model_comps = size_model_file.split('/')[-1][:-4].split('_')
if model_comps[0] not in self.size_models.keys():
self.size_models[model_comps[0]] = {}
model_name = model_comps[1].replace('-', ' ')
with open(size_model_file, 'rb') as smf:
self.size_models[model_comps[0]][model_name] = pickle.load(smf)
size_dist_model_files = sorted(glob(model_path + '*_sizedist.pkl'))
if len(size_dist_model_files) > 0:
for dist_model_file in size_dist_model_files:
model_comps = dist_model_file.split('/')[-1][:-4].split('_')
if model_comps[0] not in self.size_distribution_models.keys():
self.size_distribution_models[model_comps[0]] = {}
if '_'.join(model_comps[2:-1]
) not in self.size_distribution_models[model_comps[0]].keys():
self.size_distribution_models[model_comps[0]]['_'.join(
model_comps[2:-1])] = {}
model_name = model_comps[1].replace('-', ' ')
with open(dist_model_file, 'rb') as dmf:
self.size_distribution_models[model_comps[0]]['_'.join(
model_comps[2:-1])][model_name] = pickle.load(dmf)
track_model_files = sorted(glob(model_path + '*_track.pkl'))
if len(track_model_files) > 0:
for track_model_file in track_model_files:
model_comps = track_model_file.split('/')[-1][:-4].split('_')
group = model_comps[0]
model_name = model_comps[1].replace('-', ' ')
model_type = model_comps[2]
if model_type not in self.track_models.keys():
self.track_models[model_type] = {}
if group not in self.track_models[model_type].keys():
self.track_models[model_type][group] = {}
with open(track_model_file, 'rb') as tmf:
self.track_models[model_type][group][model_name] = pickle.load(
tmf)
|
load_models
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_path):\n condition_model_files = sorted(glob(model_path + '*_condition.pkl'))\n if len(condition_model_files) > 0:\n for condition_model_file in condition_model_files:\n model_comps = condition_model_file.split('/')[-1][:-4].split('_')\n if model_comps[0] not in self.condition_models.keys():\n self.condition_models[model_comps[0]] = {}\n model_name = model_comps[1].replace('-', ' ')\n with open(condition_model_file, 'rb') as cmf:\n if 'condition_threshold' in condition_model_file:\n self.condition_models[model_comps[0]][model_name +\n '_condition_threshold'] = pickle.load(cmf)\n else:\n self.condition_models[model_comps[0]][model_name\n ] = pickle.load(cmf)\n size_model_files = sorted(glob(model_path + '*_size.pkl'))\n if len(size_model_files) > 0:\n for size_model_file in size_model_files:\n model_comps = size_model_file.split('/')[-1][:-4].split('_')\n if model_comps[0] not in self.size_models.keys():\n self.size_models[model_comps[0]] = {}\n model_name = model_comps[1].replace('-', ' ')\n with open(size_model_file, 'rb') as smf:\n self.size_models[model_comps[0]][model_name] = pickle.load(smf)\n size_dist_model_files = sorted(glob(model_path + '*_sizedist.pkl'))\n if len(size_dist_model_files) > 0:\n for dist_model_file in size_dist_model_files:\n model_comps = dist_model_file.split('/')[-1][:-4].split('_')\n if model_comps[0] not in self.size_distribution_models.keys():\n self.size_distribution_models[model_comps[0]] = {}\n if '_'.join(model_comps[2:-1]\n ) not in self.size_distribution_models[model_comps[0]].keys():\n self.size_distribution_models[model_comps[0]]['_'.join(\n model_comps[2:-1])] = {}\n model_name = model_comps[1].replace('-', ' ')\n with open(dist_model_file, 'rb') as dmf:\n self.size_distribution_models[model_comps[0]]['_'.join(\n model_comps[2:-1])][model_name] = pickle.load(dmf)\n track_model_files = sorted(glob(model_path + '*_track.pkl'))\n if len(track_model_files) > 0:\n for track_model_file in track_model_files:\n model_comps = track_model_file.split('/')[-1][:-4].split('_')\n group = model_comps[0]\n model_name = model_comps[1].replace('-', ' ')\n model_type = model_comps[2]\n if model_type not in self.track_models.keys():\n self.track_models[model_type] = {}\n if group not in self.track_models[model_type].keys():\n self.track_models[model_type][group] = {}\n with open(track_model_file, 'rb') as tmf:\n self.track_models[model_type][group][model_name] = pickle.load(\n tmf)\n",
"role": "user"
},
{
"content": "load_models",
"role": "assistant"
}
] |
def x(self, forecasts, condition_model_names, size_model_names,
dist_model_names, track_model_names, json_data_path, out_path):
total_tracks = self.data['forecast']['total']
for r in np.arange(total_tracks.shape[0]):
track_id = total_tracks.loc[r, 'Track_ID']
print(track_id)
track_num = track_id.split('_')[-1]
ensemble_name = total_tracks.loc[r, 'Ensemble_Name']
member = total_tracks.loc[r, 'Ensemble_Member']
group = self.data['forecast']['member'].loc[self.data['forecast'][
'member']['Ensemble_Member'] == member, self.group_col].values[0]
run_date = track_id.split('_')[-4][:8]
step_forecasts = {}
for ml_model in condition_model_names:
step_forecasts['condition_' + ml_model.replace(' ', '-')
] = forecasts['condition'][group].loc[forecasts['condition'
][group]['Track_ID'] == track_id, ml_model]
for ml_model in size_model_names:
step_forecasts['size_' + ml_model.replace(' ', '-')] = forecasts[
'size'][group][ml_model].loc[forecasts['size'][group][
ml_model]['Track_ID'] == track_id]
for ml_model in dist_model_names:
step_forecasts['dist_' + ml_model.replace(' ', '-')] = forecasts[
'dist'][group][ml_model].loc[forecasts['dist'][group][
ml_model]['Track_ID'] == track_id]
for model_type in forecasts['track'].keys():
for ml_model in track_model_names:
mframe = forecasts['track'][model_type][group][ml_model]
step_forecasts[model_type + '_' + ml_model.replace(' ', '-')
] = mframe.loc[mframe['Track_ID'] == track_id]
json_file_name = '{0}_{1}_{2}_model_track_{3}.json'.format(
ensemble_name, run_date, member, track_num)
full_json_path = json_data_path + '/'.join([run_date, member]
) + '/' + json_file_name
with open(full_json_path) as json_file_obj:
try:
track_obj = json.load(json_file_obj)
except FileNotFoundError:
print(full_json_path + ' not found')
continue
for f, feature in enumerate(track_obj['features']):
del feature['properties']['attributes']
for model_name, fdata in step_forecasts.items():
ml_model_name = model_name.split('_')[1]
if 'condition' in model_name:
feature['properties'][model_name] = fdata.values[f]
else:
predcols = []
for col in fdata.columns:
if ml_model_name in col:
predcols.append(col)
feature['properties'][model_name] = fdata.loc[:, predcols
].values[f].tolist()
full_path = []
for part in [run_date, member]:
full_path.append(part)
if not os.access(out_path + '/'.join(full_path), os.R_OK):
try:
os.mkdir(out_path + '/'.join(full_path))
except OSError:
print('directory already created')
out_json_filename = out_path + '/'.join(full_path
) + '/' + json_file_name
with open(out_json_filename, 'w') as out_json_obj:
json.dump(track_obj, out_json_obj, indent=1, sort_keys=True)
return
|
output_forecasts_json
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, forecasts, condition_model_names, size_model_names,\n dist_model_names, track_model_names, json_data_path, out_path):\n total_tracks = self.data['forecast']['total']\n for r in np.arange(total_tracks.shape[0]):\n track_id = total_tracks.loc[r, 'Track_ID']\n print(track_id)\n track_num = track_id.split('_')[-1]\n ensemble_name = total_tracks.loc[r, 'Ensemble_Name']\n member = total_tracks.loc[r, 'Ensemble_Member']\n group = self.data['forecast']['member'].loc[self.data['forecast'][\n 'member']['Ensemble_Member'] == member, self.group_col].values[0]\n run_date = track_id.split('_')[-4][:8]\n step_forecasts = {}\n for ml_model in condition_model_names:\n step_forecasts['condition_' + ml_model.replace(' ', '-')\n ] = forecasts['condition'][group].loc[forecasts['condition'\n ][group]['Track_ID'] == track_id, ml_model]\n for ml_model in size_model_names:\n step_forecasts['size_' + ml_model.replace(' ', '-')] = forecasts[\n 'size'][group][ml_model].loc[forecasts['size'][group][\n ml_model]['Track_ID'] == track_id]\n for ml_model in dist_model_names:\n step_forecasts['dist_' + ml_model.replace(' ', '-')] = forecasts[\n 'dist'][group][ml_model].loc[forecasts['dist'][group][\n ml_model]['Track_ID'] == track_id]\n for model_type in forecasts['track'].keys():\n for ml_model in track_model_names:\n mframe = forecasts['track'][model_type][group][ml_model]\n step_forecasts[model_type + '_' + ml_model.replace(' ', '-')\n ] = mframe.loc[mframe['Track_ID'] == track_id]\n json_file_name = '{0}_{1}_{2}_model_track_{3}.json'.format(\n ensemble_name, run_date, member, track_num)\n full_json_path = json_data_path + '/'.join([run_date, member]\n ) + '/' + json_file_name\n with open(full_json_path) as json_file_obj:\n try:\n track_obj = json.load(json_file_obj)\n except FileNotFoundError:\n print(full_json_path + ' not found')\n continue\n for f, feature in enumerate(track_obj['features']):\n del feature['properties']['attributes']\n for model_name, fdata in step_forecasts.items():\n ml_model_name = model_name.split('_')[1]\n if 'condition' in model_name:\n feature['properties'][model_name] = fdata.values[f]\n else:\n predcols = []\n for col in fdata.columns:\n if ml_model_name in col:\n predcols.append(col)\n feature['properties'][model_name] = fdata.loc[:, predcols\n ].values[f].tolist()\n full_path = []\n for part in [run_date, member]:\n full_path.append(part)\n if not os.access(out_path + '/'.join(full_path), os.R_OK):\n try:\n os.mkdir(out_path + '/'.join(full_path))\n except OSError:\n print('directory already created')\n out_json_filename = out_path + '/'.join(full_path\n ) + '/' + json_file_name\n with open(out_json_filename, 'w') as out_json_obj:\n json.dump(track_obj, out_json_obj, indent=1, sort_keys=True)\n return\n",
"role": "user"
},
{
"content": "output_forecasts_json",
"role": "assistant"
}
] |
def x(self, forecasts, mode, csv_path, run_date_format='%Y%m%d-%H%M'):
merged_forecasts = pd.merge(forecasts['condition'], forecasts['dist'],
on=['Step_ID', 'Track_ID', 'Ensemble_Member', 'Forecast_Hour'])
all_members = self.data[mode]['combo']['Ensemble_Member']
members = np.unique(all_members)
all_run_dates = pd.DatetimeIndex(self.data[mode]['combo']['Run_Date'])
run_dates = pd.DatetimeIndex(np.unique(all_run_dates))
print(run_dates)
for member in members:
for run_date in run_dates:
mem_run_index = (all_run_dates == run_date) & (all_members ==
member)
member_forecast = merged_forecasts.loc[mem_run_index]
member_forecast.to_csv(join(csv_path,
'hail_forecasts_{0}_{1}_{2}.csv'.format(self.ensemble_name,
member, run_date.strftime(run_date_format))))
return
|
output_forecasts_csv
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, forecasts, mode, csv_path, run_date_format='%Y%m%d-%H%M'):\n merged_forecasts = pd.merge(forecasts['condition'], forecasts['dist'],\n on=['Step_ID', 'Track_ID', 'Ensemble_Member', 'Forecast_Hour'])\n all_members = self.data[mode]['combo']['Ensemble_Member']\n members = np.unique(all_members)\n all_run_dates = pd.DatetimeIndex(self.data[mode]['combo']['Run_Date'])\n run_dates = pd.DatetimeIndex(np.unique(all_run_dates))\n print(run_dates)\n for member in members:\n for run_date in run_dates:\n mem_run_index = (all_run_dates == run_date) & (all_members ==\n member)\n member_forecast = merged_forecasts.loc[mem_run_index]\n member_forecast.to_csv(join(csv_path,\n 'hail_forecasts_{0}_{1}_{2}.csv'.format(self.ensemble_name,\n member, run_date.strftime(run_date_format))))\n return\n",
"role": "user"
},
{
"content": "output_forecasts_csv",
"role": "assistant"
}
] |
def x(self):
if self._convencao is None:
if self._caminho.endswith(('.DLL', '.dll')):
self._convencao = constantes.WINDOWS_STDCALL
else:
self._convencao = constantes.STANDARD_C
if self._convencao == constantes.STANDARD_C:
loader = ctypes.CDLL
elif self._convencao == constantes.WINDOWS_STDCALL:
loader = ctypes.WinDLL
else:
raise ValueError('Convencao de chamada desconhecida: {!r}'.format(
self._convencao))
self._libsat = loader(self._caminho)
|
carregar
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self._convencao is None:\n if self._caminho.endswith(('.DLL', '.dll')):\n self._convencao = constantes.WINDOWS_STDCALL\n else:\n self._convencao = constantes.STANDARD_C\n if self._convencao == constantes.STANDARD_C:\n loader = ctypes.CDLL\n elif self._convencao == constantes.WINDOWS_STDCALL:\n loader = ctypes.WinDLL\n else:\n raise ValueError('Convencao de chamada desconhecida: {!r}'.format(\n self._convencao))\n self._libsat = loader(self._caminho)\n",
"role": "user"
},
{
"content": "carregar",
"role": "assistant"
}
] |
def x(self, tipo_certificado, cnpj, codigo_uf):
return self.invocar__AtivarSAT(self.gerar_numero_sessao(),
tipo_certificado, self._codigo_ativacao, cnpj, codigo_uf)
|
ativar_sat
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tipo_certificado, cnpj, codigo_uf):\n return self.invocar__AtivarSAT(self.gerar_numero_sessao(),\n tipo_certificado, self._codigo_ativacao, cnpj, codigo_uf)\n",
"role": "user"
},
{
"content": "ativar_sat",
"role": "assistant"
}
] |
def x(self, certificado):
return self.invocar__ComunicarCertificadoICPBRASIL(self.
gerar_numero_sessao(), self._codigo_ativacao, certificado)
|
comunicar_certificado_icpbrasil
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, certificado):\n return self.invocar__ComunicarCertificadoICPBRASIL(self.\n gerar_numero_sessao(), self._codigo_ativacao, certificado)\n",
"role": "user"
},
{
"content": "comunicar_certificado_icpbrasil",
"role": "assistant"
}
] |
def x(self, dados_venda):
cfe_venda = dados_venda if isinstance(dados_venda, basestring
) else dados_venda.documento()
return self.invocar__EnviarDadosVenda(self.gerar_numero_sessao(), self.
_codigo_ativacao, cfe_venda)
|
enviar_dados_venda
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dados_venda):\n cfe_venda = dados_venda if isinstance(dados_venda, basestring\n ) else dados_venda.documento()\n return self.invocar__EnviarDadosVenda(self.gerar_numero_sessao(), self.\n _codigo_ativacao, cfe_venda)\n",
"role": "user"
},
{
"content": "enviar_dados_venda",
"role": "assistant"
}
] |
def x(self, chave_cfe, dados_cancelamento):
cfe_canc = dados_cancelamento if isinstance(dados_cancelamento, basestring
) else dados_cancelamento.documento()
return self.invocar__CancelarUltimaVenda(self.gerar_numero_sessao(),
self._codigo_ativacao, chave_cfe, cfe_canc)
|
cancelar_ultima_venda
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, chave_cfe, dados_cancelamento):\n cfe_canc = dados_cancelamento if isinstance(dados_cancelamento, basestring\n ) else dados_cancelamento.documento()\n return self.invocar__CancelarUltimaVenda(self.gerar_numero_sessao(),\n self._codigo_ativacao, chave_cfe, cfe_canc)\n",
"role": "user"
},
{
"content": "cancelar_ultima_venda",
"role": "assistant"
}
] |
def x(self, numero_sessao):
return self.invocar__ConsultarNumeroSessao(self.gerar_numero_sessao(),
self._codigo_ativacao, numero_sessao)
|
consultar_numero_sessao
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, numero_sessao):\n return self.invocar__ConsultarNumeroSessao(self.gerar_numero_sessao(),\n self._codigo_ativacao, numero_sessao)\n",
"role": "user"
},
{
"content": "consultar_numero_sessao",
"role": "assistant"
}
] |
def x(self, configuracao):
conf_xml = configuracao if isinstance(configuracao, basestring
) else configuracao.documento()
return self.invocar__ConfigurarInterfaceDeRede(self.gerar_numero_sessao
(), self._codigo_ativacao, conf_xml)
|
configurar_interface_de_rede
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, configuracao):\n conf_xml = configuracao if isinstance(configuracao, basestring\n ) else configuracao.documento()\n return self.invocar__ConfigurarInterfaceDeRede(self.gerar_numero_sessao\n (), self._codigo_ativacao, conf_xml)\n",
"role": "user"
},
{
"content": "configurar_interface_de_rede",
"role": "assistant"
}
] |
def x(self, sequencia_cnpj, assinatura_ac):
return self.invocar__AssociarAssinatura(self.gerar_numero_sessao(),
self._codigo_ativacao, sequencia_cnpj, assinatura_ac)
|
associar_assinatura
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, sequencia_cnpj, assinatura_ac):\n return self.invocar__AssociarAssinatura(self.gerar_numero_sessao(),\n self._codigo_ativacao, sequencia_cnpj, assinatura_ac)\n",
"role": "user"
},
{
"content": "associar_assinatura",
"role": "assistant"
}
] |
def x(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR,
codigo_emergencia=None):
if not novo_codigo_ativacao:
raise ValueError('Novo codigo de ativacao invalido: {!r}'.format(
novo_codigo_ativacao))
codigo_ativacao = self._codigo_ativacao
if opcao == constantes.CODIGO_ATIVACAO_EMERGENCIA:
if codigo_emergencia:
codigo_ativacao = codigo_emergencia
else:
raise ValueError(
'Codigo de ativacao de emergencia invalido: {!r} (opcao={!r})'
.format(codigo_emergencia, opcao))
return self.invocar__TrocarCodigoDeAtivacao(self.gerar_numero_sessao(),
codigo_ativacao, opcao, novo_codigo_ativacao, novo_codigo_ativacao)
|
trocar_codigo_de_ativacao
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, novo_codigo_ativacao, opcao=constantes.CODIGO_ATIVACAO_REGULAR,\n codigo_emergencia=None):\n if not novo_codigo_ativacao:\n raise ValueError('Novo codigo de ativacao invalido: {!r}'.format(\n novo_codigo_ativacao))\n codigo_ativacao = self._codigo_ativacao\n if opcao == constantes.CODIGO_ATIVACAO_EMERGENCIA:\n if codigo_emergencia:\n codigo_ativacao = codigo_emergencia\n else:\n raise ValueError(\n 'Codigo de ativacao de emergencia invalido: {!r} (opcao={!r})'\n .format(codigo_emergencia, opcao))\n return self.invocar__TrocarCodigoDeAtivacao(self.gerar_numero_sessao(),\n codigo_ativacao, opcao, novo_codigo_ativacao, novo_codigo_ativacao)\n",
"role": "user"
},
{
"content": "trocar_codigo_de_ativacao",
"role": "assistant"
}
] |
def x(self):
forecast_path = self.forecast_json_path + '/{0}/{1}/'.format(self.
run_date.strftime('%Y%m%d'), self.ensemble_member)
forecast_files = sorted(glob(forecast_path + '*.json'))
for forecast_file in forecast_files:
file_obj = open(forecast_file)
json_obj = json.load(file_obj)
file_obj.close()
track_id = json_obj['properties']['id']
obs_track_id = json_obj['properties']['obs_track_id']
forecast_hours = json_obj['properties']['times']
duration = json_obj['properties']['duration']
for f, feature in enumerate(json_obj['features']):
area = np.sum(feature['properties']['masks'])
step_id = track_id + '_{0:02d}'.format(f)
for model_type in self.model_types:
for model_name in self.model_names[model_type]:
prediction = feature['properties'][model_type + '_' +
model_name.replace(' ', '-')]
if model_type == 'condition':
prediction = [prediction]
row = [track_id, obs_track_id, self.ensemble_name, self
.ensemble_member, forecast_hours[f], f + 1,
duration, area] + prediction
self.forecasts[model_type][model_name].loc[step_id] = row
|
load_forecasts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n forecast_path = self.forecast_json_path + '/{0}/{1}/'.format(self.\n run_date.strftime('%Y%m%d'), self.ensemble_member)\n forecast_files = sorted(glob(forecast_path + '*.json'))\n for forecast_file in forecast_files:\n file_obj = open(forecast_file)\n json_obj = json.load(file_obj)\n file_obj.close()\n track_id = json_obj['properties']['id']\n obs_track_id = json_obj['properties']['obs_track_id']\n forecast_hours = json_obj['properties']['times']\n duration = json_obj['properties']['duration']\n for f, feature in enumerate(json_obj['features']):\n area = np.sum(feature['properties']['masks'])\n step_id = track_id + '_{0:02d}'.format(f)\n for model_type in self.model_types:\n for model_name in self.model_names[model_type]:\n prediction = feature['properties'][model_type + '_' +\n model_name.replace(' ', '-')]\n if model_type == 'condition':\n prediction = [prediction]\n row = [track_id, obs_track_id, self.ensemble_name, self\n .ensemble_member, forecast_hours[f], f + 1,\n duration, area] + prediction\n self.forecasts[model_type][model_name].loc[step_id] = row\n",
"role": "user"
},
{
"content": "load_forecasts",
"role": "assistant"
}
] |
def x(self):
track_total_file = (self.track_data_csv_path +
'track_total_{0}_{1}_{2}.csv'.format(self.ensemble_name, self.
ensemble_member, self.run_date.strftime('%Y%m%d')))
track_step_file = (self.track_data_csv_path +
'track_step_{0}_{1}_{2}.csv'.format(self.ensemble_name, self.
ensemble_member, self.run_date.strftime('%Y%m%d')))
track_total_cols = ['Track_ID', 'Translation_Error_X',
'Translation_Error_Y', 'Start_Time_Error']
track_step_cols = ['Step_ID', 'Track_ID', 'Hail_Size', 'Shape',
'Location', 'Scale']
track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols)
track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols)
obs_data = pd.merge(track_step_data, track_total_data, on='Track_ID',
how='left')
self.obs = obs_data
|
load_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n track_total_file = (self.track_data_csv_path +\n 'track_total_{0}_{1}_{2}.csv'.format(self.ensemble_name, self.\n ensemble_member, self.run_date.strftime('%Y%m%d')))\n track_step_file = (self.track_data_csv_path +\n 'track_step_{0}_{1}_{2}.csv'.format(self.ensemble_name, self.\n ensemble_member, self.run_date.strftime('%Y%m%d')))\n track_total_cols = ['Track_ID', 'Translation_Error_X',\n 'Translation_Error_Y', 'Start_Time_Error']\n track_step_cols = ['Step_ID', 'Track_ID', 'Hail_Size', 'Shape',\n 'Location', 'Scale']\n track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols)\n track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols)\n obs_data = pd.merge(track_step_data, track_total_data, on='Track_ID',\n how='left')\n self.obs = obs_data\n",
"role": "user"
},
{
"content": "load_obs",
"role": "assistant"
}
] |
def x(self):
for model_type in self.model_types:
self.matched_forecasts[model_type] = {}
for model_name in self.model_names[model_type]:
self.matched_forecasts[model_type][model_name] = pd.merge(self.
forecasts[model_type][model_name], self.obs, right_on=
'Step_ID', how='left', left_index=True)
|
merge_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n for model_type in self.model_types:\n self.matched_forecasts[model_type] = {}\n for model_name in self.model_names[model_type]:\n self.matched_forecasts[model_type][model_name] = pd.merge(self.\n forecasts[model_type][model_name], self.obs, right_on=\n 'Step_ID', how='left', left_index=True)\n",
"role": "user"
},
{
"content": "merge_obs",
"role": "assistant"
}
] |
def x(self, model_type, model_name, condition_model_name,
condition_threshold, query=None):
def x(x, a, loc, b):
if a == 0 or b == 0:
cdf = np.ones(x.shape)
else:
cdf = gamma.cdf(x, a, loc, b)
return cdf
crps_obj = DistributedCRPS(self.dist_thresholds)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(
query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts['condition'][
condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
condition_forecasts = self.matched_forecasts['condition'][
condition_model_name]
if sub_forecasts.shape[0] > 0:
if model_type == 'dist':
forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.
dist_thresholds.size))
for f in range(sub_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.
forecast_bins['condition'][0]]
if condition_prob >= condition_threshold:
f_params = [0, 0, 0]
else:
f_params = sub_forecasts[self.forecast_bins[model_type]
].values[f]
forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params
[0], f_params[1], f_params[2])
obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params) for
params in sub_forecasts[self.type_cols[model_type]].values])
crps_obj.update(forecast_cdfs, obs_cdfs)
else:
crps_obj.update(sub_forecasts[self.forecast_bins[model_type].
astype(str)].values, sub_forecasts[self.type_cols[
model_type]].values)
return crps_obj
|
crps
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_type, model_name, condition_model_name,\n condition_threshold, query=None):\n\n def x(x, a, loc, b):\n if a == 0 or b == 0:\n cdf = np.ones(x.shape)\n else:\n cdf = gamma.cdf(x, a, loc, b)\n return cdf\n crps_obj = DistributedCRPS(self.dist_thresholds)\n if query is not None:\n sub_forecasts = self.matched_forecasts[model_type][model_name].query(\n query)\n sub_forecasts = sub_forecasts.reset_index(drop=True)\n condition_forecasts = self.matched_forecasts['condition'][\n condition_model_name].query(query)\n condition_forecasts = condition_forecasts.reset_index(drop=True)\n else:\n sub_forecasts = self.matched_forecasts[model_type][model_name]\n condition_forecasts = self.matched_forecasts['condition'][\n condition_model_name]\n if sub_forecasts.shape[0] > 0:\n if model_type == 'dist':\n forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.\n dist_thresholds.size))\n for f in range(sub_forecasts.shape[0]):\n condition_prob = condition_forecasts.loc[f, self.\n forecast_bins['condition'][0]]\n if condition_prob >= condition_threshold:\n f_params = [0, 0, 0]\n else:\n f_params = sub_forecasts[self.forecast_bins[model_type]\n ].values[f]\n forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params\n [0], f_params[1], f_params[2])\n obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params) for\n params in sub_forecasts[self.type_cols[model_type]].values])\n crps_obj.update(forecast_cdfs, obs_cdfs)\n else:\n crps_obj.update(sub_forecasts[self.forecast_bins[model_type].\n astype(str)].values, sub_forecasts[self.type_cols[\n model_type]].values)\n return crps_obj\n",
"role": "user"
},
{
"content": "crps",
"role": "assistant"
}
] |
def x(self, model_type, model_name, intensity_threshold, prob_thresholds,
query=None):
roc_obj = DistributedROC(prob_thresholds, 0.5)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(
query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
obs_values = np.zeros(sub_forecasts.shape[0])
if sub_forecasts.shape[0] > 0:
if model_type == 'dist':
forecast_values = np.array([gamma_sf(intensity_threshold, *
params) for params in sub_forecasts[self.forecast_bins[
model_type]].values])
obs_probs = np.array([gamma_sf(intensity_threshold, *params) for
params in sub_forecasts[self.type_cols[model_type]].values])
obs_values[obs_probs >= 0.01] = 1
elif len(self.forecast_bins[model_type]) > 1:
fbin = np.argmin(np.abs(self.forecast_bins[model_type] -
intensity_threshold))
forecast_values = 1 - sub_forecasts[self.forecast_bins[
model_type].astype(str)].values.cumsum(axis=1)[:, fbin]
obs_values[sub_forecasts[self.type_cols[model_type]].values >=
intensity_threshold] = 1
else:
forecast_values = sub_forecasts[self.forecast_bins[model_type].
astype(str)[0]].values
obs_values[sub_forecasts[self.type_cols[model_type]].values >=
intensity_threshold] = 1
roc_obj.update(forecast_values, obs_values)
return roc_obj
|
roc
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, model_type, model_name, intensity_threshold, prob_thresholds,\n query=None):\n roc_obj = DistributedROC(prob_thresholds, 0.5)\n if query is not None:\n sub_forecasts = self.matched_forecasts[model_type][model_name].query(\n query)\n sub_forecasts = sub_forecasts.reset_index(drop=True)\n else:\n sub_forecasts = self.matched_forecasts[model_type][model_name]\n obs_values = np.zeros(sub_forecasts.shape[0])\n if sub_forecasts.shape[0] > 0:\n if model_type == 'dist':\n forecast_values = np.array([gamma_sf(intensity_threshold, *\n params) for params in sub_forecasts[self.forecast_bins[\n model_type]].values])\n obs_probs = np.array([gamma_sf(intensity_threshold, *params) for\n params in sub_forecasts[self.type_cols[model_type]].values])\n obs_values[obs_probs >= 0.01] = 1\n elif len(self.forecast_bins[model_type]) > 1:\n fbin = np.argmin(np.abs(self.forecast_bins[model_type] -\n intensity_threshold))\n forecast_values = 1 - sub_forecasts[self.forecast_bins[\n model_type].astype(str)].values.cumsum(axis=1)[:, fbin]\n obs_values[sub_forecasts[self.type_cols[model_type]].values >=\n intensity_threshold] = 1\n else:\n forecast_values = sub_forecasts[self.forecast_bins[model_type].\n astype(str)[0]].values\n obs_values[sub_forecasts[self.type_cols[model_type]].values >=\n intensity_threshold] = 1\n roc_obj.update(forecast_values, obs_values)\n return roc_obj\n",
"role": "user"
},
{
"content": "roc",
"role": "assistant"
}
] |
def x(self, dist_model_name, condition_model_name, num_samples,
condition_threshold=0.5, query=None):
if query is not None:
dist_forecasts = self.matched_forecasts['dist'][dist_model_name].query(
query)
dist_forecasts = dist_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts['condition'][
condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
dist_forecasts = self.matched_forecasts['dist'][dist_model_name]
condition_forecasts = self.matched_forecasts['condition'][
condition_model_name]
max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))
areas = dist_forecasts['Area'].values
for f in np.arange(dist_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins[
'condition'][0]]
if condition_prob >= condition_threshold:
max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f,
self.forecast_bins['dist']].values, size=(num_samples,
areas[f])).max(axis=1))
return max_hail_samples
|
sample_forecast_max_hail
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dist_model_name, condition_model_name, num_samples,\n condition_threshold=0.5, query=None):\n if query is not None:\n dist_forecasts = self.matched_forecasts['dist'][dist_model_name].query(\n query)\n dist_forecasts = dist_forecasts.reset_index(drop=True)\n condition_forecasts = self.matched_forecasts['condition'][\n condition_model_name].query(query)\n condition_forecasts = condition_forecasts.reset_index(drop=True)\n else:\n dist_forecasts = self.matched_forecasts['dist'][dist_model_name]\n condition_forecasts = self.matched_forecasts['condition'][\n condition_model_name]\n max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))\n areas = dist_forecasts['Area'].values\n for f in np.arange(dist_forecasts.shape[0]):\n condition_prob = condition_forecasts.loc[f, self.forecast_bins[\n 'condition'][0]]\n if condition_prob >= condition_threshold:\n max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f,\n self.forecast_bins['dist']].values, size=(num_samples,\n areas[f])).max(axis=1))\n return max_hail_samples\n",
"role": "user"
},
{
"content": "sample_forecast_max_hail",
"role": "assistant"
}
] |
def x(self):
params = {'key': self.get_app_key(), 'uid': self.user_id, 'widget':
self.widget_code}
products_number = len(self.products)
if self.get_api_type() == self.API_GOODS:
if isinstance(self.products, list):
if products_number == 1:
product = self.products[0]
if isinstance(product, Product):
post_trial_product = None
if isinstance(product.get_trial_product(), Product):
post_trial_product = product
product = product.get_trial_product()
params['amount'] = product.get_amount()
params['currencyCode'] = product.get_currency_code()
params['ag_name'] = product.get_name()
params['ag_external_id'] = product.get_id()
params['ag_type'] = product.get_type()
if product.get_type() == Product.TYPE_SUBSCRIPTION:
params['ag_period_length'] = product.get_period_length(
)
params['ag_period_type'] = product.get_period_type()
if product.is_recurring():
params['ag_recurring'] = 1 if product.is_recurring(
) else 0
if post_trial_product:
params['ag_trial'] = 1
params['ag_post_trial_external_id'
] = post_trial_product.get_id()
params['ag_post_trial_period_length'
] = post_trial_product.get_period_length()
params['ag_post_trial_period_type'
] = post_trial_product.get_period_type()
params['ag_post_trial_name'
] = post_trial_product.get_name()
params['post_trial_amount'
] = post_trial_product.get_amount()
params['post_trial_currencyCode'
] = post_trial_product.get_currency_code()
else:
self.append_to_errors('Not a Product instance')
else:
self.append_to_errors('Only 1 product is allowed')
elif self.get_api_type() == self.API_CART:
index = 0
for product in self.products:
params['external_ids[' + str(index) + ']'] = product.get_id()
if product.get_amount() > 0:
params['prices[' + str(index) + ']'] = product.get_amount()
if product.get_currency_code() != '' and product.get_currency_code(
) is not None:
params['currencies[' + str(index) + ']'
] = product.get_currency_code()
index += 1
params['sign_version'] = signature_version = str(self.
get_default_widget_signature())
if not self.is_empty(self.extra_params, 'sign_version'):
signature_version = params['sign_version'] = str(self.extra_params[
'sign_version'])
params = self.array_merge(params, self.extra_params)
params['sign'] = self.calculate_signature(params, self.get_secret_key(),
int(signature_version))
return params
|
get_params
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n params = {'key': self.get_app_key(), 'uid': self.user_id, 'widget':\n self.widget_code}\n products_number = len(self.products)\n if self.get_api_type() == self.API_GOODS:\n if isinstance(self.products, list):\n if products_number == 1:\n product = self.products[0]\n if isinstance(product, Product):\n post_trial_product = None\n if isinstance(product.get_trial_product(), Product):\n post_trial_product = product\n product = product.get_trial_product()\n params['amount'] = product.get_amount()\n params['currencyCode'] = product.get_currency_code()\n params['ag_name'] = product.get_name()\n params['ag_external_id'] = product.get_id()\n params['ag_type'] = product.get_type()\n if product.get_type() == Product.TYPE_SUBSCRIPTION:\n params['ag_period_length'] = product.get_period_length(\n )\n params['ag_period_type'] = product.get_period_type()\n if product.is_recurring():\n params['ag_recurring'] = 1 if product.is_recurring(\n ) else 0\n if post_trial_product:\n params['ag_trial'] = 1\n params['ag_post_trial_external_id'\n ] = post_trial_product.get_id()\n params['ag_post_trial_period_length'\n ] = post_trial_product.get_period_length()\n params['ag_post_trial_period_type'\n ] = post_trial_product.get_period_type()\n params['ag_post_trial_name'\n ] = post_trial_product.get_name()\n params['post_trial_amount'\n ] = post_trial_product.get_amount()\n params['post_trial_currencyCode'\n ] = post_trial_product.get_currency_code()\n else:\n self.append_to_errors('Not a Product instance')\n else:\n self.append_to_errors('Only 1 product is allowed')\n elif self.get_api_type() == self.API_CART:\n index = 0\n for product in self.products:\n params['external_ids[' + str(index) + ']'] = product.get_id()\n if product.get_amount() > 0:\n params['prices[' + str(index) + ']'] = product.get_amount()\n if product.get_currency_code() != '' and product.get_currency_code(\n ) is not None:\n params['currencies[' + str(index) + ']'\n ] = product.get_currency_code()\n index += 1\n params['sign_version'] = signature_version = str(self.\n get_default_widget_signature())\n if not self.is_empty(self.extra_params, 'sign_version'):\n signature_version = params['sign_version'] = str(self.extra_params[\n 'sign_version'])\n params = self.array_merge(params, self.extra_params)\n params['sign'] = self.calculate_signature(params, self.get_secret_key(),\n int(signature_version))\n return params\n",
"role": "user"
},
{
"content": "get_params",
"role": "assistant"
}
] |
def x(segundos):
h = segundos / 3600
m = (segundos - 3600 * h) / 60
s = segundos - 3600 * h - m * 60
return h, m, s
|
hms
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(segundos):\n h = segundos / 3600\n m = (segundos - 3600 * h) / 60\n s = segundos - 3600 * h - m * 60\n return h, m, s\n",
"role": "user"
},
{
"content": "hms",
"role": "assistant"
}
] |
def x(segundos):
p = lambda n, s, p: p if n > 1 else s
h, m, s = hms(segundos)
tokens = ['' if h == 0 else '{:d} {}'.format(h, p(h, 'hora', 'horas')),
'' if m == 0 else '{:d} {}'.format(m, p(m, 'minuto', 'minutos')),
'' if s == 0 else '{:d} {}'.format(s, p(s, 'segundo', 'segundos'))]
tokens = [token for token in tokens if token]
if len(tokens) == 1:
return tokens[0]
if len(tokens) > 1:
return '{} e {}'.format(', '.join(tokens[:-1]), tokens[-1])
return 'zero segundos'
|
hms_humanizado
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(segundos):\n p = lambda n, s, p: p if n > 1 else s\n h, m, s = hms(segundos)\n tokens = ['' if h == 0 else '{:d} {}'.format(h, p(h, 'hora', 'horas')),\n '' if m == 0 else '{:d} {}'.format(m, p(m, 'minuto', 'minutos')), \n '' if s == 0 else '{:d} {}'.format(s, p(s, 'segundo', 'segundos'))]\n tokens = [token for token in tokens if token]\n if len(tokens) == 1:\n return tokens[0]\n if len(tokens) > 1:\n return '{} e {}'.format(', '.join(tokens[:-1]), tokens[-1])\n return 'zero segundos'\n",
"role": "user"
},
{
"content": "hms_humanizado",
"role": "assistant"
}
] |
def x(self, selected_variable):
names = self.unknown_names
units = self.unknown_units
for key, value in names.items():
if selected_variable == value:
Id = key
u = units[key]
return Id, u
|
format_grib_name
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, selected_variable):\n names = self.unknown_names\n units = self.unknown_units\n for key, value in names.items():\n if selected_variable == value:\n Id = key\n u = units[key]\n return Id, u\n",
"role": "user"
},
{
"content": "format_grib_name",
"role": "assistant"
}
] |
def x(self):
file_objects = self.file_objects
var = self.variable
valid_date = self.valid_dates
data = self.data
unknown_names = self.unknown_names
unknown_units = self.unknown_units
member = self.member
lat = self.lat
lon = self.lon
if self.sector_ind_path:
inds_file = pd.read_csv(self.sector_ind_path +
'sector_data_indices.csv')
inds = inds_file.loc[:, 'indices']
out_x = self.mapping_data['x']
if not file_objects:
print()
print('No {0} model runs on {1}'.format(member, self.run_date))
print()
units = None
return self.data, units
for f, file in enumerate(file_objects):
grib = pygrib.open(file)
if type(var) is int:
data_values = grib[var].values
if grib[var].units == 'unknown':
Id = grib[var].parameterNumber
units = self.unknown_units[Id]
else:
units = grib[var].units
elif type(var) is str:
if '_' in var:
variable = var.split('_')[0]
level = int(var.split('_')[1])
if variable in unknown_names.values():
Id, units = self.format_grib_name(variable)
data_values = grib.select(parameterNumber=Id, level=level)[
0].values
else:
data_values = grib.select(name=variable, level=level)[0
].values
units = grib.select(name=variable, level=level)[0].units
elif var in unknown_names.values():
Id, units = self.format_grib_name(var)
data_values = grib.select(parameterNumber=Id)[0].values
elif len(grib.select(name=var)) > 1:
raise NameError(
"Multiple '{0}' records found. Rename with level:'{0}_level'"
.format(var))
else:
data_values = grib.select(name=var)[0].values
units = grib.select(name=var)[0].units
if data is None:
data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1
]), dtype=float)
if self.sector_ind_path:
data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)
else:
data[f] = data_values[:]
elif self.sector_ind_path:
data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)
else:
data[f] = data_values[:]
return data, units
|
load_data
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n file_objects = self.file_objects\n var = self.variable\n valid_date = self.valid_dates\n data = self.data\n unknown_names = self.unknown_names\n unknown_units = self.unknown_units\n member = self.member\n lat = self.lat\n lon = self.lon\n if self.sector_ind_path:\n inds_file = pd.read_csv(self.sector_ind_path +\n 'sector_data_indices.csv')\n inds = inds_file.loc[:, 'indices']\n out_x = self.mapping_data['x']\n if not file_objects:\n print()\n print('No {0} model runs on {1}'.format(member, self.run_date))\n print()\n units = None\n return self.data, units\n for f, file in enumerate(file_objects):\n grib = pygrib.open(file)\n if type(var) is int:\n data_values = grib[var].values\n if grib[var].units == 'unknown':\n Id = grib[var].parameterNumber\n units = self.unknown_units[Id]\n else:\n units = grib[var].units\n elif type(var) is str:\n if '_' in var:\n variable = var.split('_')[0]\n level = int(var.split('_')[1])\n if variable in unknown_names.values():\n Id, units = self.format_grib_name(variable)\n data_values = grib.select(parameterNumber=Id, level=level)[\n 0].values\n else:\n data_values = grib.select(name=variable, level=level)[0\n ].values\n units = grib.select(name=variable, level=level)[0].units\n elif var in unknown_names.values():\n Id, units = self.format_grib_name(var)\n data_values = grib.select(parameterNumber=Id)[0].values\n elif len(grib.select(name=var)) > 1:\n raise NameError(\n \"Multiple '{0}' records found. Rename with level:'{0}_level'\"\n .format(var))\n else:\n data_values = grib.select(name=var)[0].values\n units = grib.select(name=var)[0].units\n if data is None:\n data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1\n ]), dtype=float)\n if self.sector_ind_path:\n data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)\n else:\n data[f] = data_values[:]\n elif self.sector_ind_path:\n data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)\n else:\n data[f] = data_values[:]\n return data, units\n",
"role": "user"
},
{
"content": "load_data",
"role": "assistant"
}
] |
def x(self):
run_date_str = self.run_date.strftime('%Y%m%d')
for model_name in self.model_names:
self.raw_forecasts[model_name] = {}
forecast_file = (self.forecast_path + run_date_str + '/' +
model_name.replace(' ', '-') + '_hailprobs_{0}_{1}.nc'.format(
self.ensemble_member, run_date_str))
forecast_obj = Dataset(forecast_file)
forecast_hours = forecast_obj.variables['forecast_hour'][:]
valid_hour_indices = np.where((self.start_hour <= forecast_hours) &
(forecast_hours <= self.end_hour))[0]
for size_threshold in self.size_thresholds:
self.raw_forecasts[model_name][size_threshold
] = forecast_obj.variables['prob_hail_{0:02d}_mm'.format(
size_threshold)][valid_hour_indices]
forecast_obj.close()
|
load_forecasts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n run_date_str = self.run_date.strftime('%Y%m%d')\n for model_name in self.model_names:\n self.raw_forecasts[model_name] = {}\n forecast_file = (self.forecast_path + run_date_str + '/' +\n model_name.replace(' ', '-') + '_hailprobs_{0}_{1}.nc'.format(\n self.ensemble_member, run_date_str))\n forecast_obj = Dataset(forecast_file)\n forecast_hours = forecast_obj.variables['forecast_hour'][:]\n valid_hour_indices = np.where((self.start_hour <= forecast_hours) &\n (forecast_hours <= self.end_hour))[0]\n for size_threshold in self.size_thresholds:\n self.raw_forecasts[model_name][size_threshold\n ] = forecast_obj.variables['prob_hail_{0:02d}_mm'.format(\n size_threshold)][valid_hour_indices]\n forecast_obj.close()\n",
"role": "user"
},
{
"content": "load_forecasts",
"role": "assistant"
}
] |
def x(self):
for model_name in self.model_names:
self.window_forecasts[model_name] = {}
for size_threshold in self.size_thresholds:
self.window_forecasts[model_name][size_threshold] = np.array([
self.raw_forecasts[model_name][size_threshold][sl].sum(axis
=0) for sl in self.hour_windows])
|
get_window_forecasts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n for model_name in self.model_names:\n self.window_forecasts[model_name] = {}\n for size_threshold in self.size_thresholds:\n self.window_forecasts[model_name][size_threshold] = np.array([\n self.raw_forecasts[model_name][size_threshold][sl].sum(axis\n =0) for sl in self.hour_windows])\n",
"role": "user"
},
{
"content": "get_window_forecasts",
"role": "assistant"
}
] |
def x(self, mask_threshold=0.5):
start_date = self.run_date + timedelta(hours=self.start_hour)
end_date = self.run_date + timedelta(hours=self.end_hour)
mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.
mrms_path)
mrms_grid.load_data()
if len(mrms_grid.data) > 0:
self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100,
100, mrms_grid.data)
self.window_obs[self.mrms_variable] = np.array([self.raw_obs[self.
mrms_variable][sl].max(axis=0) for sl in self.hour_windows])
if self.obs_mask:
mask_grid = MRMSGrid(start_date, end_date, self.mask_variable,
self.mrms_path)
mask_grid.load_data()
self.raw_obs[self.mask_variable] = np.where(mask_grid.data >=
mask_threshold, 1, 0)
self.window_obs[self.mask_variable] = np.array([self.raw_obs[
self.mask_variable][sl].max(axis=0) for sl in self.
hour_windows])
|
load_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, mask_threshold=0.5):\n start_date = self.run_date + timedelta(hours=self.start_hour)\n end_date = self.run_date + timedelta(hours=self.end_hour)\n mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.\n mrms_path)\n mrms_grid.load_data()\n if len(mrms_grid.data) > 0:\n self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, \n 100, mrms_grid.data)\n self.window_obs[self.mrms_variable] = np.array([self.raw_obs[self.\n mrms_variable][sl].max(axis=0) for sl in self.hour_windows])\n if self.obs_mask:\n mask_grid = MRMSGrid(start_date, end_date, self.mask_variable,\n self.mrms_path)\n mask_grid.load_data()\n self.raw_obs[self.mask_variable] = np.where(mask_grid.data >=\n mask_threshold, 1, 0)\n self.window_obs[self.mask_variable] = np.array([self.raw_obs[\n self.mask_variable][sl].max(axis=0) for sl in self.\n hour_windows])\n",
"role": "user"
},
{
"content": "load_obs",
"role": "assistant"
}
] |
def x(self, dilation_radius):
for s in self.size_thresholds:
self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].
shape)
for t in range(self.dilated_obs[s].shape[0]):
self.dilated_obs[s][t][binary_dilation(self.window_obs[self.
mrms_variable][t] >= s, iterations=dilation_radius)] = 1
|
dilate_obs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, dilation_radius):\n for s in self.size_thresholds:\n self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].\n shape)\n for t in range(self.dilated_obs[s].shape[0]):\n self.dilated_obs[s][t][binary_dilation(self.window_obs[self.\n mrms_variable][t] >= s, iterations=dilation_radius)] = 1\n",
"role": "user"
},
{
"content": "dilate_obs",
"role": "assistant"
}
] |
def x(self, prob_thresholds):
all_roc_curves = {}
for model_name in self.model_names:
all_roc_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_roc_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = hour_window.start, hour_window.stop
all_roc_curves[model_name][size_threshold][hour_range
] = DistributedROC(prob_thresholds, 1)
if self.obs_mask:
all_roc_curves[model_name][size_threshold][hour_range
].update(self.window_forecasts[model_name][
size_threshold][h][self.window_obs[self.
mask_variable][h] > 0], self.dilated_obs[
size_threshold][h][self.window_obs[self.
mask_variable][h] > 0])
else:
all_roc_curves[model_name][size_threshold][hour_range
].update(self.window_forecasts[model_name][
size_threshold][h], self.dilated_obs[size_threshold][h]
)
return all_roc_curves
|
roc_curves
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, prob_thresholds):\n all_roc_curves = {}\n for model_name in self.model_names:\n all_roc_curves[model_name] = {}\n for size_threshold in self.size_thresholds:\n all_roc_curves[model_name][size_threshold] = {}\n for h, hour_window in enumerate(self.hour_windows):\n hour_range = hour_window.start, hour_window.stop\n all_roc_curves[model_name][size_threshold][hour_range\n ] = DistributedROC(prob_thresholds, 1)\n if self.obs_mask:\n all_roc_curves[model_name][size_threshold][hour_range\n ].update(self.window_forecasts[model_name][\n size_threshold][h][self.window_obs[self.\n mask_variable][h] > 0], self.dilated_obs[\n size_threshold][h][self.window_obs[self.\n mask_variable][h] > 0])\n else:\n all_roc_curves[model_name][size_threshold][hour_range\n ].update(self.window_forecasts[model_name][\n size_threshold][h], self.dilated_obs[size_threshold][h]\n )\n return all_roc_curves\n",
"role": "user"
},
{
"content": "roc_curves",
"role": "assistant"
}
] |
def x(self, prob_thresholds):
all_rel_curves = {}
for model_name in self.model_names:
all_rel_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_rel_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = hour_window.start, hour_window.stop
all_rel_curves[model_name][size_threshold][hour_range
] = DistributedReliability(prob_thresholds, 1)
if self.obs_mask:
all_rel_curves[model_name][size_threshold][hour_range
].update(self.window_forecasts[model_name][
size_threshold][h][self.window_obs[self.
mask_variable][h] > 0], self.dilated_obs[
size_threshold][h][self.window_obs[self.
mask_variable][h] > 0])
else:
all_rel_curves[model_name][size_threshold][hour_range
].update(self.window_forecasts[model_name][
size_threshold][h], self.dilated_obs[size_threshold][h]
)
return all_rel_curves
|
reliability_curves
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, prob_thresholds):\n all_rel_curves = {}\n for model_name in self.model_names:\n all_rel_curves[model_name] = {}\n for size_threshold in self.size_thresholds:\n all_rel_curves[model_name][size_threshold] = {}\n for h, hour_window in enumerate(self.hour_windows):\n hour_range = hour_window.start, hour_window.stop\n all_rel_curves[model_name][size_threshold][hour_range\n ] = DistributedReliability(prob_thresholds, 1)\n if self.obs_mask:\n all_rel_curves[model_name][size_threshold][hour_range\n ].update(self.window_forecasts[model_name][\n size_threshold][h][self.window_obs[self.\n mask_variable][h] > 0], self.dilated_obs[\n size_threshold][h][self.window_obs[self.\n mask_variable][h] > 0])\n else:\n all_rel_curves[model_name][size_threshold][hour_range\n ].update(self.window_forecasts[model_name][\n size_threshold][h], self.dilated_obs[size_threshold][h]\n )\n return all_rel_curves\n",
"role": "user"
},
{
"content": "reliability_curves",
"role": "assistant"
}
] |
def x(map_file):
if map_file[-4:] == '.pkl':
map_data = pickle.load(open(map_file))
lon = map_data['lon']
lat = map_data['lat']
else:
map_data = Dataset(map_file)
if 'lon' in map_data.variables.keys():
lon = map_data.variables['lon'][:]
lat = map_data.variables['lat'][:]
else:
lon = map_data.variables['XLONG'][0]
lat = map_data.variables['XLAT'][0]
return lon, lat
|
load_map_coordinates
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(map_file):\n if map_file[-4:] == '.pkl':\n map_data = pickle.load(open(map_file))\n lon = map_data['lon']\n lat = map_data['lat']\n else:\n map_data = Dataset(map_file)\n if 'lon' in map_data.variables.keys():\n lon = map_data.variables['lon'][:]\n lat = map_data.variables['lat'][:]\n else:\n lon = map_data.variables['XLONG'][0]\n lat = map_data.variables['XLAT'][0]\n return lon, lat\n",
"role": "user"
},
{
"content": "load_map_coordinates",
"role": "assistant"
}
] |
def x(start_date, variable, interp_type, mrms_path, map_filename, out_path):
try:
print(start_date, variable)
end_date = start_date + timedelta(hours=23)
mrms = MRMSGrid(start_date, end_date, variable, mrms_path)
if mrms.data is not None:
if map_filename[-3:] == 'map':
mapping_data = make_proj_grids(*read_arps_map_file(
map_filename))
mrms.interpolate_to_netcdf(mapping_data['lon'],
mapping_data['lat'], out_path, interp_type=interp_type)
elif map_filename[-3:] == 'txt':
mapping_data = make_proj_grids(*read_ncar_map_file(
map_filename))
mrms.interpolate_to_netcdf(mapping_data['lon'],
mapping_data['lat'], out_path, interp_type=interp_type)
else:
lon, lat = load_map_coordinates(map_filename)
mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=
interp_type)
except Exception as e:
print(traceback.format_exc())
raise e
|
interpolate_mrms_day
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(start_date, variable, interp_type, mrms_path, map_filename, out_path):\n try:\n print(start_date, variable)\n end_date = start_date + timedelta(hours=23)\n mrms = MRMSGrid(start_date, end_date, variable, mrms_path)\n if mrms.data is not None:\n if map_filename[-3:] == 'map':\n mapping_data = make_proj_grids(*read_arps_map_file(\n map_filename))\n mrms.interpolate_to_netcdf(mapping_data['lon'],\n mapping_data['lat'], out_path, interp_type=interp_type)\n elif map_filename[-3:] == 'txt':\n mapping_data = make_proj_grids(*read_ncar_map_file(\n map_filename))\n mrms.interpolate_to_netcdf(mapping_data['lon'],\n mapping_data['lat'], out_path, interp_type=interp_type)\n else:\n lon, lat = load_map_coordinates(map_filename)\n mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=\n interp_type)\n except Exception as e:\n print(traceback.format_exc())\n raise e\n",
"role": "user"
},
{
"content": "interpolate_mrms_day",
"role": "assistant"
}
] |
def x(self):
data = []
loaded_dates = []
loaded_indices = []
for t, timestamp in enumerate(self.all_dates):
date_str = timestamp.date().strftime('%Y%m%d')
full_path = self.path_start + date_str + '/'
if self.variable in os.listdir(full_path):
full_path += self.variable + '/'
data_files = sorted(os.listdir(full_path))
file_dates = pd.to_datetime([d.split('_')[-1][0:13] for d in
data_files])
if timestamp in file_dates:
data_file = data_files[np.where(timestamp == file_dates)[0][0]]
print(full_path + data_file)
if data_file[-2:] == 'gz':
subprocess.call(['gunzip', full_path + data_file])
file_obj = Nio.open_file(full_path + data_file[:-3])
else:
file_obj = Nio.open_file(full_path + data_file)
var_name = sorted(file_obj.variables.keys())[0]
data.append(file_obj.variables[var_name][:])
if self.lon is None:
self.lon = file_obj.variables['lon_0'][:]
if np.count_nonzero(self.lon > 180) > 0:
self.lon -= 360
self.lat = file_obj.variables['lat_0'][:]
file_obj.close()
if data_file[-2:] == 'gz':
subprocess.call(['gzip', full_path + data_file[:-3]])
else:
subprocess.call(['gzip', full_path + data_file])
loaded_dates.append(timestamp)
loaded_indices.append(t)
if len(loaded_dates) > 0:
self.loaded_dates = pd.DatetimeIndex(loaded_dates)
self.data = np.ones((self.all_dates.shape[0], data[0].shape[0],
data[0].shape[1])) * -9999
self.data[loaded_indices] = np.array(data)
|
load_data
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n data = []\n loaded_dates = []\n loaded_indices = []\n for t, timestamp in enumerate(self.all_dates):\n date_str = timestamp.date().strftime('%Y%m%d')\n full_path = self.path_start + date_str + '/'\n if self.variable in os.listdir(full_path):\n full_path += self.variable + '/'\n data_files = sorted(os.listdir(full_path))\n file_dates = pd.to_datetime([d.split('_')[-1][0:13] for d in\n data_files])\n if timestamp in file_dates:\n data_file = data_files[np.where(timestamp == file_dates)[0][0]]\n print(full_path + data_file)\n if data_file[-2:] == 'gz':\n subprocess.call(['gunzip', full_path + data_file])\n file_obj = Nio.open_file(full_path + data_file[:-3])\n else:\n file_obj = Nio.open_file(full_path + data_file)\n var_name = sorted(file_obj.variables.keys())[0]\n data.append(file_obj.variables[var_name][:])\n if self.lon is None:\n self.lon = file_obj.variables['lon_0'][:]\n if np.count_nonzero(self.lon > 180) > 0:\n self.lon -= 360\n self.lat = file_obj.variables['lat_0'][:]\n file_obj.close()\n if data_file[-2:] == 'gz':\n subprocess.call(['gzip', full_path + data_file[:-3]])\n else:\n subprocess.call(['gzip', full_path + data_file])\n loaded_dates.append(timestamp)\n loaded_indices.append(t)\n if len(loaded_dates) > 0:\n self.loaded_dates = pd.DatetimeIndex(loaded_dates)\n self.data = np.ones((self.all_dates.shape[0], data[0].shape[0],\n data[0].shape[1])) * -9999\n self.data[loaded_indices] = np.array(data)\n",
"role": "user"
},
{
"content": "load_data",
"role": "assistant"
}
] |
def x(self, in_lon, in_lat):
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
for d in range(self.data.shape[0]):
print('Loading ', d, self.variable, self.start_date)
if self.data[d].max() > -999:
step = self.data[d]
step[step < 0] = 0
if self.lat[-1] < self.lat[0]:
spline = RectBivariateSpline(self.lat[::-1], self.lon, step
[::-1], kx=3, ky=3)
else:
spline = RectBivariateSpline(self.lat, self.lon, step, kx=3,
ky=3)
print('Evaluating', d, self.variable, self.start_date)
flat_data = spline.ev(in_lat.ravel(), in_lon.ravel())
out_data[d] = flat_data.reshape(in_lon.shape)
del spline
else:
print(d, ' is missing')
out_data[d] = -9999
return out_data
|
interpolate_grid
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, in_lon, in_lat):\n out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))\n for d in range(self.data.shape[0]):\n print('Loading ', d, self.variable, self.start_date)\n if self.data[d].max() > -999:\n step = self.data[d]\n step[step < 0] = 0\n if self.lat[-1] < self.lat[0]:\n spline = RectBivariateSpline(self.lat[::-1], self.lon, step\n [::-1], kx=3, ky=3)\n else:\n spline = RectBivariateSpline(self.lat, self.lon, step, kx=3,\n ky=3)\n print('Evaluating', d, self.variable, self.start_date)\n flat_data = spline.ev(in_lat.ravel(), in_lon.ravel())\n out_data[d] = flat_data.reshape(in_lon.shape)\n del spline\n else:\n print(d, ' is missing')\n out_data[d] = -9999\n return out_data\n",
"role": "user"
},
{
"content": "interpolate_grid",
"role": "assistant"
}
] |
def x(self, in_lon, in_lat, radius=0.05):
out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))
in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)
out_indices = np.indices(out_data.shape[1:])
out_rows = out_indices[0].ravel()
out_cols = out_indices[1].ravel()
for d in range(self.data.shape[0]):
nz_points = np.where(self.data[d] > 0)
if len(nz_points[0]) > 0:
nz_vals = self.data[d][nz_points]
nz_rank = np.argsort(nz_vals)
original_points = cKDTree(np.vstack((self.lat[nz_points[0][
nz_rank]], self.lon[nz_points[1][nz_rank]])).T)
all_neighbors = original_points.query_ball_tree(in_tree, radius,
p=2, eps=0)
for n, neighbors in enumerate(all_neighbors):
if len(neighbors) > 0:
out_data[d, out_rows[neighbors], out_cols[neighbors]
] = nz_vals[nz_rank][n]
return out_data
|
max_neighbor
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, in_lon, in_lat, radius=0.05):\n out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1]))\n in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T)\n out_indices = np.indices(out_data.shape[1:])\n out_rows = out_indices[0].ravel()\n out_cols = out_indices[1].ravel()\n for d in range(self.data.shape[0]):\n nz_points = np.where(self.data[d] > 0)\n if len(nz_points[0]) > 0:\n nz_vals = self.data[d][nz_points]\n nz_rank = np.argsort(nz_vals)\n original_points = cKDTree(np.vstack((self.lat[nz_points[0][\n nz_rank]], self.lon[nz_points[1][nz_rank]])).T)\n all_neighbors = original_points.query_ball_tree(in_tree, radius,\n p=2, eps=0)\n for n, neighbors in enumerate(all_neighbors):\n if len(neighbors) > 0:\n out_data[d, out_rows[neighbors], out_cols[neighbors]\n ] = nz_vals[nz_rank][n]\n return out_data\n",
"role": "user"
},
{
"content": "max_neighbor",
"role": "assistant"
}
] |
def x(self, in_lon, in_lat, out_path, date_unit=
'seconds since 1970-01-01T00:00', interp_type='spline'):
if interp_type == 'spline':
out_data = self.interpolate_grid(in_lon, in_lat)
else:
out_data = self.max_neighbor(in_lon, in_lat)
if not os.access(out_path + self.variable, os.R_OK):
try:
os.mkdir(out_path + self.variable)
except OSError:
print(out_path + self.variable + ' already created')
out_file = out_path + self.variable + '/' + '{0}_{1}_{2}.nc'.format(self
.variable, self.start_date.strftime('%Y%m%d-%H:%M'), self.end_date.
strftime('%Y%m%d-%H:%M'))
out_obj = Dataset(out_file, 'w')
out_obj.createDimension('time', out_data.shape[0])
out_obj.createDimension('y', out_data.shape[1])
out_obj.createDimension('x', out_data.shape[2])
data_var = out_obj.createVariable(self.variable, 'f4', ('time', 'y',
'x'), zlib=True, fill_value=-9999.0, least_significant_digit=3)
data_var[:] = out_data
data_var.long_name = self.variable
data_var.coordinates = 'latitude longitude'
if 'MESH' in self.variable or 'QPE' in self.variable:
data_var.units = 'mm'
elif 'Reflectivity' in self.variable:
data_var.units = 'dBZ'
elif 'Rotation' in self.variable:
data_var.units = 's-1'
else:
data_var.units = ''
out_lon = out_obj.createVariable('longitude', 'f4', ('y', 'x'), zlib=True)
out_lon[:] = in_lon
out_lon.units = 'degrees_east'
out_lat = out_obj.createVariable('latitude', 'f4', ('y', 'x'), zlib=True)
out_lat[:] = in_lat
out_lat.units = 'degrees_north'
dates = out_obj.createVariable('time', 'i8', ('time',), zlib=True)
dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)
).astype(np.int64)
dates.long_name = 'Valid date'
dates.units = date_unit
out_obj.Conventions = 'CF-1.6'
out_obj.close()
return
|
interpolate_to_netcdf
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, in_lon, in_lat, out_path, date_unit=\n 'seconds since 1970-01-01T00:00', interp_type='spline'):\n if interp_type == 'spline':\n out_data = self.interpolate_grid(in_lon, in_lat)\n else:\n out_data = self.max_neighbor(in_lon, in_lat)\n if not os.access(out_path + self.variable, os.R_OK):\n try:\n os.mkdir(out_path + self.variable)\n except OSError:\n print(out_path + self.variable + ' already created')\n out_file = out_path + self.variable + '/' + '{0}_{1}_{2}.nc'.format(self\n .variable, self.start_date.strftime('%Y%m%d-%H:%M'), self.end_date.\n strftime('%Y%m%d-%H:%M'))\n out_obj = Dataset(out_file, 'w')\n out_obj.createDimension('time', out_data.shape[0])\n out_obj.createDimension('y', out_data.shape[1])\n out_obj.createDimension('x', out_data.shape[2])\n data_var = out_obj.createVariable(self.variable, 'f4', ('time', 'y',\n 'x'), zlib=True, fill_value=-9999.0, least_significant_digit=3)\n data_var[:] = out_data\n data_var.long_name = self.variable\n data_var.coordinates = 'latitude longitude'\n if 'MESH' in self.variable or 'QPE' in self.variable:\n data_var.units = 'mm'\n elif 'Reflectivity' in self.variable:\n data_var.units = 'dBZ'\n elif 'Rotation' in self.variable:\n data_var.units = 's-1'\n else:\n data_var.units = ''\n out_lon = out_obj.createVariable('longitude', 'f4', ('y', 'x'), zlib=True)\n out_lon[:] = in_lon\n out_lon.units = 'degrees_east'\n out_lat = out_obj.createVariable('latitude', 'f4', ('y', 'x'), zlib=True)\n out_lat[:] = in_lat\n out_lat.units = 'degrees_north'\n dates = out_obj.createVariable('time', 'i8', ('time',), zlib=True)\n dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)\n ).astype(np.int64)\n dates.long_name = 'Valid date'\n dates.units = date_unit\n out_obj.Conventions = 'CF-1.6'\n out_obj.close()\n return\n",
"role": "user"
},
{
"content": "interpolate_to_netcdf",
"role": "assistant"
}
] |
def x(hardware_source_id, sync=True):
hardware_source = HardwareSourceManager(
).get_hardware_source_for_hardware_source_id(hardware_source_id)
def x():
return hardware_source.get_next_xdatas_to_finish()[0].data.copy()
yield get_last_data
|
get_data_generator_by_id
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(hardware_source_id, sync=True):\n hardware_source = HardwareSourceManager(\n ).get_hardware_source_for_hardware_source_id(hardware_source_id)\n\n def x():\n return hardware_source.get_next_xdatas_to_finish()[0].data.copy()\n yield get_last_data\n",
"role": "user"
},
{
"content": "get_data_generator_by_id",
"role": "assistant"
}
] |
def x(config_path):
if os.path.exists(config_path):
logging.info('Parsing alias file {:s}'.format(config_path))
try:
config = configparser.ConfigParser()
config.read(config_path)
for section in config.sections():
device = config.get(section, 'device')
hardware_alias = config.get(section, 'hardware_alias')
display_name = config.get(section, 'display_name')
try:
logging.info(
'Adding alias {:s} for device {:s}, display name: {:s} '
.format(hardware_alias, device, display_name))
HardwareSourceManager().make_instrument_alias(device,
hardware_alias, _(display_name))
except Exception as e:
logging.info(
'Error creating hardware alias {:s} for device {:s} '
.format(hardware_alias, device))
logging.info(traceback.format_exc())
except Exception as e:
logging.info('Error reading alias file from: ' + config_path)
logging.info(traceback.format_exc())
return True
return False
|
parse_hardware_aliases_config_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(config_path):\n if os.path.exists(config_path):\n logging.info('Parsing alias file {:s}'.format(config_path))\n try:\n config = configparser.ConfigParser()\n config.read(config_path)\n for section in config.sections():\n device = config.get(section, 'device')\n hardware_alias = config.get(section, 'hardware_alias')\n display_name = config.get(section, 'display_name')\n try:\n logging.info(\n 'Adding alias {:s} for device {:s}, display name: {:s} '\n .format(hardware_alias, device, display_name))\n HardwareSourceManager().make_instrument_alias(device,\n hardware_alias, _(display_name))\n except Exception as e:\n logging.info(\n 'Error creating hardware alias {:s} for device {:s} '\n .format(hardware_alias, device))\n logging.info(traceback.format_exc())\n except Exception as e:\n logging.info('Error reading alias file from: ' + config_path)\n logging.info(traceback.format_exc())\n return True\n return False\n",
"role": "user"
},
{
"content": "parse_hardware_aliases_config_file",
"role": "assistant"
}
] |
def x(self, instrument_id, alias_instrument_id, display_name):
self.__aliases[alias_instrument_id] = instrument_id, display_name
for f in self.aliases_updated:
f()
|
make_instrument_alias
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, instrument_id, alias_instrument_id, display_name):\n self.__aliases[alias_instrument_id] = instrument_id, display_name\n for f in self.aliases_updated:\n f()\n",
"role": "user"
},
{
"content": "make_instrument_alias",
"role": "assistant"
}
] |
def x(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str,
sub_area, view_id) ->None:
self.__state = state
self.__sub_area = sub_area
hardware_source_id = self.__hardware_source.hardware_source_id
channel_index = self.index
channel_id = self.channel_id
channel_name = self.name
metadata = copy.deepcopy(data_and_metadata.metadata)
hardware_source_metadata = dict()
hardware_source_metadata['hardware_source_id'] = hardware_source_id
hardware_source_metadata['channel_index'] = channel_index
if channel_id is not None:
hardware_source_metadata['reference_key'] = '_'.join([
hardware_source_id, channel_id])
hardware_source_metadata['channel_id'] = channel_id
else:
hardware_source_metadata['reference_key'] = hardware_source_id
if channel_name is not None:
hardware_source_metadata['channel_name'] = channel_name
if view_id:
hardware_source_metadata['view_id'] = view_id
metadata.setdefault('hardware_source', dict()).update(
hardware_source_metadata)
data = data_and_metadata.data
master_data = (self.__data_and_metadata.data if self.
__data_and_metadata else None)
data_matches = (master_data is not None and data.shape == master_data.
shape and data.dtype == master_data.dtype)
if data_matches and sub_area is not None:
top = sub_area[0][0]
bottom = sub_area[0][0] + sub_area[1][0]
left = sub_area[0][1]
right = sub_area[0][1] + sub_area[1][1]
if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[
1]:
master_data = numpy.copy(master_data)
master_data[top:bottom, left:right] = data[top:bottom, left:right]
else:
master_data = numpy.copy(data)
else:
master_data = data
data_descriptor = data_and_metadata.data_descriptor
intensity_calibration = (data_and_metadata.intensity_calibration if
data_and_metadata else None)
dimensional_calibrations = (data_and_metadata.dimensional_calibrations if
data_and_metadata else None)
timestamp = data_and_metadata.timestamp
new_extended_data = DataAndMetadata.new_data_and_metadata(master_data,
intensity_calibration=intensity_calibration,
dimensional_calibrations=dimensional_calibrations, metadata=
metadata, timestamp=timestamp, data_descriptor=data_descriptor)
self.__data_and_metadata = new_extended_data
self.data_channel_updated_event.fire(new_extended_data)
self.is_dirty = True
|
update
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str,\n sub_area, view_id) ->None:\n self.__state = state\n self.__sub_area = sub_area\n hardware_source_id = self.__hardware_source.hardware_source_id\n channel_index = self.index\n channel_id = self.channel_id\n channel_name = self.name\n metadata = copy.deepcopy(data_and_metadata.metadata)\n hardware_source_metadata = dict()\n hardware_source_metadata['hardware_source_id'] = hardware_source_id\n hardware_source_metadata['channel_index'] = channel_index\n if channel_id is not None:\n hardware_source_metadata['reference_key'] = '_'.join([\n hardware_source_id, channel_id])\n hardware_source_metadata['channel_id'] = channel_id\n else:\n hardware_source_metadata['reference_key'] = hardware_source_id\n if channel_name is not None:\n hardware_source_metadata['channel_name'] = channel_name\n if view_id:\n hardware_source_metadata['view_id'] = view_id\n metadata.setdefault('hardware_source', dict()).update(\n hardware_source_metadata)\n data = data_and_metadata.data\n master_data = (self.__data_and_metadata.data if self.\n __data_and_metadata else None)\n data_matches = (master_data is not None and data.shape == master_data.\n shape and data.dtype == master_data.dtype)\n if data_matches and sub_area is not None:\n top = sub_area[0][0]\n bottom = sub_area[0][0] + sub_area[1][0]\n left = sub_area[0][1]\n right = sub_area[0][1] + sub_area[1][1]\n if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[\n 1]:\n master_data = numpy.copy(master_data)\n master_data[top:bottom, left:right] = data[top:bottom, left:right]\n else:\n master_data = numpy.copy(data)\n else:\n master_data = data\n data_descriptor = data_and_metadata.data_descriptor\n intensity_calibration = (data_and_metadata.intensity_calibration if\n data_and_metadata else None)\n dimensional_calibrations = (data_and_metadata.dimensional_calibrations if\n data_and_metadata else None)\n timestamp = data_and_metadata.timestamp\n new_extended_data = DataAndMetadata.new_data_and_metadata(master_data,\n intensity_calibration=intensity_calibration,\n dimensional_calibrations=dimensional_calibrations, metadata=\n metadata, timestamp=timestamp, data_descriptor=data_descriptor)\n self.__data_and_metadata = new_extended_data\n self.data_channel_updated_event.fire(new_extended_data)\n self.is_dirty = True\n",
"role": "user"
},
{
"content": "update",
"role": "assistant"
}
] |
def x(self):
old_start_count = self.__start_count
self.__start_count += 1
if old_start_count == 0:
self.data_channel_start_event.fire()
|
start
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n old_start_count = self.__start_count\n self.__start_count += 1\n if old_start_count == 0:\n self.data_channel_start_event.fire()\n",
"role": "user"
},
{
"content": "start",
"role": "assistant"
}
] |
def x(self, data_item_reference):
display_item = data_item_reference.display_item
data_item = display_item.data_item if display_item else None
if data_item and display_item:
self.__connect_display(display_item)
else:
def x():
self.__data_item_reference_changed_event_listener.close()
self.connect_data_item_reference(data_item_reference)
self.__data_item_reference_changed_event_listener = (
data_item_reference.data_item_reference_changed_event.listen(
data_item_reference_changed))
|
connect_data_item_reference
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, data_item_reference):\n display_item = data_item_reference.display_item\n data_item = display_item.data_item if display_item else None\n if data_item and display_item:\n self.__connect_display(display_item)\n else:\n\n def x():\n self.__data_item_reference_changed_event_listener.close()\n self.connect_data_item_reference(data_item_reference)\n self.__data_item_reference_changed_event_listener = (\n data_item_reference.data_item_reference_changed_event.listen(\n data_item_reference_changed))\n",
"role": "user"
},
{
"content": "connect_data_item_reference",
"role": "assistant"
}
] |
def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata
]:
timeout = timeout if timeout is not None else 10.0
with self.__buffer_lock:
if len(self.__buffer) == 0:
done_event = threading.Event()
self.__done_events.append(done_event)
self.__buffer_lock.release()
done = done_event.wait(timeout)
self.__buffer_lock.acquire()
if not done:
raise Exception('Could not grab latest.')
return self.__buffer.pop(0)
|
grab_earliest
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata\n ]:\n timeout = timeout if timeout is not None else 10.0\n with self.__buffer_lock:\n if len(self.__buffer) == 0:\n done_event = threading.Event()\n self.__done_events.append(done_event)\n self.__buffer_lock.release()\n done = done_event.wait(timeout)\n self.__buffer_lock.acquire()\n if not done:\n raise Exception('Could not grab latest.')\n return self.__buffer.pop(0)\n",
"role": "user"
},
{
"content": "grab_earliest",
"role": "assistant"
}
] |
def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata
]:
with self.__buffer_lock:
self.__buffer = list()
return self.grab_latest(timeout)
|
grab_next
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata\n ]:\n with self.__buffer_lock:\n self.__buffer = list()\n return self.grab_latest(timeout)\n",
"role": "user"
},
{
"content": "grab_next",
"role": "assistant"
}
] |
def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata
]:
self.grab_next(timeout)
return self.grab_next(timeout)
|
grab_following
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, timeout: float=None) ->typing.List[DataAndMetadata.DataAndMetadata\n ]:\n self.grab_next(timeout)\n return self.grab_next(timeout)\n",
"role": "user"
},
{
"content": "grab_following",
"role": "assistant"
}
] |
def x(self) ->None:
with self.__state_lock:
if self.__state == DataChannelBuffer.State.started:
self.__state = DataChannelBuffer.State.paused
|
pause
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self) ->None:\n with self.__state_lock:\n if self.__state == DataChannelBuffer.State.started:\n self.__state = DataChannelBuffer.State.paused\n",
"role": "user"
},
{
"content": "pause",
"role": "assistant"
}
] |
def x(self) ->None:
with self.__state_lock:
if self.__state == DataChannelBuffer.State.paused:
self.__state = DataChannelBuffer.State.started
|
resume
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self) ->None:\n with self.__state_lock:\n if self.__state == DataChannelBuffer.State.paused:\n self.__state = DataChannelBuffer.State.started\n",
"role": "user"
},
{
"content": "resume",
"role": "assistant"
}
] |
def x(n, mapping):
try:
it = mapping.iteritems()
except AttributeError:
it = iter(mapping.items())
pq = minpq()
try:
for i in range(n):
pq.additem(*next(it))
except StopIteration:
pass
try:
while it:
pq.pushpopitem(*next(it))
except StopIteration:
pass
out = list(pq.popkeys())
out.reverse()
return out
|
nlargest
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(n, mapping):\n try:\n it = mapping.iteritems()\n except AttributeError:\n it = iter(mapping.items())\n pq = minpq()\n try:\n for i in range(n):\n pq.additem(*next(it))\n except StopIteration:\n pass\n try:\n while it:\n pq.pushpopitem(*next(it))\n except StopIteration:\n pass\n out = list(pq.popkeys())\n out.reverse()\n return out\n",
"role": "user"
},
{
"content": "nlargest",
"role": "assistant"
}
] |
def x(cls, iterable, value, **kwargs):
return cls(((k, value) for k in iterable), **kwargs)
|
fromkeys
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cls, iterable, value, **kwargs):\n return cls(((k, value) for k in iterable), **kwargs)\n",
"role": "user"
},
{
"content": "fromkeys",
"role": "assistant"
}
] |
def x(self):
return self.__class__(self, key=self._keyfn, precedes=self._precedes)
|
copy
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return self.__class__(self, key=self._keyfn, precedes=self._precedes)\n",
"role": "user"
},
{
"content": "copy",
"role": "assistant"
}
] |
def x(self, key=__marker, default=__marker):
heap = self._heap
position = self._position
if key is self.__marker:
if not heap:
raise KeyError('pqdict is empty')
key = heap[0].key
del self[key]
return key
try:
pos = position.pop(key)
except KeyError:
if default is self.__marker:
raise
return default
else:
node_to_delete = heap[pos]
end = heap.pop()
if end is not node_to_delete:
heap[pos] = end
position[end.key] = pos
self._reheapify(pos)
value = node_to_delete.value
del node_to_delete
return value
|
pop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, key=__marker, default=__marker):\n heap = self._heap\n position = self._position\n if key is self.__marker:\n if not heap:\n raise KeyError('pqdict is empty')\n key = heap[0].key\n del self[key]\n return key\n try:\n pos = position.pop(key)\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n node_to_delete = heap[pos]\n end = heap.pop()\n if end is not node_to_delete:\n heap[pos] = end\n position[end.key] = pos\n self._reheapify(pos)\n value = node_to_delete.value\n del node_to_delete\n return value\n",
"role": "user"
},
{
"content": "pop",
"role": "assistant"
}
] |
def x(self):
heap = self._heap
position = self._position
try:
end = heap.pop(-1)
except IndexError:
raise KeyError('pqdict is empty')
if heap:
node = heap[0]
heap[0] = end
position[end.key] = 0
self._sink(0)
else:
node = end
del position[node.key]
return node.key, node.value
|
popitem
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n heap = self._heap\n position = self._position\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n if heap:\n node = heap[0]\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key]\n return node.key, node.value\n",
"role": "user"
},
{
"content": "popitem",
"role": "assistant"
}
] |
def x(self):
try:
node = self._heap[0]
except IndexError:
raise KeyError('pqdict is empty')
return node.key, node.value
|
topitem
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n try:\n node = self._heap[0]\n except IndexError:\n raise KeyError('pqdict is empty')\n return node.key, node.value\n",
"role": "user"
},
{
"content": "topitem",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.