query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Updates meta information in meta file. | def update_meta_file(meta: Dict):
print("Info: Updating meta file.")
try:
with open(meta_file_name, "w") as meta_file:
json.dump(meta, meta_file)
except OSError:
sys.exit("Could not open/write meta file: meta.json.") | [
"def update_metadata(self):\n for element in self.elements:\n self.meta_data[element.name] = element.meta_data()",
"def update_metadata(self, metadata):\n if metadata:\n self._metadata.update(metadata)\n self.to_swap_dir()",
"def update_metadata(self, new_control):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the server software version string. | def version_string(self):
return self.server_version + ' ' + self.sys_version | [
"def get_version() -> str:\n return VERSION",
"def get_software_version(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 0F\"))\n temp = self.board_socket.recv(1024)\n return(temp[3:10])",
"def server_version(self):\n ret = getattr(self, \"_SERVER_VERSION\", \"\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert string s at index i. | def insert(self, index, s):
raise NotImplementedError | [
"def insert_string(string: str, index: int, insert: str):\n return string[:index] + insert + string[index:]",
"def insert(self, text, i):\n self.fqn[i] = text;\n self.len = i + 1\n for j in range(self.len, 256): # wipe out stuff at higher indices\n self.fqn[j] = ''",
"def ins... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a single bug to the simulation. | def add_bug(self, index=None, bug=None, cell_iter=None):
bug = bug or Bug()
if bug in self.bugs:
LOG.warn("Bug %s already added" % bug)
return False
if index:
cell = self.grid[index]
else:
cell_iter = cell_iter or self.available_cell_iter
... | [
"def addIssue(self, issue):\r\n # type: (Issue) -> ()\r\n # let's worry about manual indexing later?\r\n self.issues.append(issue)\r\n self.fireTableDataChanged()",
"def foundBug(self):\n pass",
"def bug(*_):\n\n return REPLY(content=None, attachments=[\n ISSUE_NEW,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rudimentary heat diffusion. Suppose half the lost heat radiates and the other half transmits. | def diffuse(self):
transmission_coeff = 0.3
# allow the grid to cool down
sink_coeff = 0.1
for idx, cell in self.grid.cells():
# how much total heat the cell radiates
emission_loss = cell.heat * transmission_coeff
neighbors = self.grid.neighbors(idx)
... | [
"def heat(self):\r\n return self.m_act * (self.outlet.h - self.cond.h)",
"def _momentum_diffusion(self):\n\n # copy grid ids and other variables\n wet_pwet_h_links = self.wet_pwet_horizontal_links\n wet_pwet_v_links = self.wet_pwet_vertical_links\n link_east = self.link_east\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
run pyuic5 on a single module | def build(module_name):
ui_file = f"{UI_PATH}{module_name}.ui"
py_file = f"{PY_PATH}{module_name}.py"
command = "pyuic5 {} -o {}"
# in the case of failure CPython will print its own error message
if os.system(command.format(ui_file, py_file)) == 0:
print(f"made Ui_{module_name}.py") | [
"def convert_ui(*args):\r\n directory_files = [file for file in listdir(getcwd()) if isfile(file)]\r\n uifiles = [file for file in directory_files if file[-3:] == '.ui']\r\n if len(args) == 0:\r\n for file in uifiles:\r\n system(f'pyuic5 {file} -o {file[:-3] + \".py\"}')\r\n else:\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assembles the residuals vector and the Jacobian matrix. | def assemble_residuals_and_jacobian(number_of_nodes, element_lengths, elasticity_tensor,
centerline, rotation,
increments, second_strain_invariant):
# start with a blank residuals vector
residuals = np.zeros((6*number_of_nodes), dtype=float... | [
"def residual_jacobian(self, x):\n sres = np.zeros((len(self.prior_list), len(x)))\n for iprior, prior in enumerate(self.prior_list):\n sres[iprior, prior['index']] = prior['residual_dx'](\n x[prior['index']]\n )\n\n return sres",
"def prepareJacobian(self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the corresponding keras layer from configurations | def layer_from_config(layer_conf, model_conf, data_conf):
# context = {"class_count": data_conf["class_count"]}
return object_from_conf(layer_conf, scope="layer", context=None) | [
"def get_layer(keras_tensor):\n layer = keras_tensor._keras_history[0]\n return layer",
"def get_layer_by_name(name):\n if name == ConvLayer.__name__:\n return ConvLayer\n elif name == DepthConvLayer.__name__:\n return DepthConvLayer\n elif name == PoolingLayer.__name__:\n retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the value of this node, if it is None, this means the output of this node has not been computed yet, so it requires all the parameters needed from corresponding dependency nodes | def value(self):
if self._value is not None:
return self._value
# Compute the dependencies
inputs = []
for name, idx in self.param:
if not name == 'None':
output = name_to_nodes[name].value()[idx]
inputs.append(out... | [
"def get_value(self):\n return self.node.value()",
"def _get_its_own_value_from_input(self, input_values, reevaluate):\n if self in input_values:\n value = input_values[self]\n elif self._type == \"Deterministic node\":\n value = self._get_sample(1, input_values=input_va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a keras network from configuration dict | def net_from_config(model_conf, data_conf):
# Get network conf
net_conf = model_conf["net"]
# Input layer
transform_confs = model_conf["dataset"].get("train_transforms", [])
# Get the shape of the dataset, first check whether we have clip-feature layer in the dataset, if not, we
# use th... | [
"def generate_model_configuration(args):\n\n model_config = {\n\n \"dataset_path\": args.dataset_config.output_folder, # Input dataset folder path.\n \"reaction_classes\": args.dataset_config.final_classes, # Final list of reaction classes.\n \"input_configs\": args.descriptor_config.model... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the available machines that jobs can run on | def get_machines(request):
machines = {}
#for (machine, attrs) in gridutil.GRID_RESOURCE_TABLE.iteritems(): not work on python3
for (machine, attrs) in slurmutil.GRID_RESOURCE_TABLE.items():
if attrs['jobmanagers'] != {}:
machines[machine] = attrs['jobmanagers']
return machines | [
"def machines(self):\n ret = self._get_attr(\"machines\")\n return ret",
"def available_jobs():\n return run_rvt2('base.help.AvailableJobs', background=False)",
"def machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MachineReferenceWithHintsArgs']]]]:\n return pulumi.get(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up the command line parser for the config subprogram and adds it to sub_parsers. | def setup_parser(sub_parsers):
parser = sub_parsers.add_parser(
"config",
prog="law config",
description="Configuration helper to get, set or remove a value from the law configuration "
"file ({}).".format(_cfg.config_file),
)
parser.add_argument(
"name",
nar... | [
"def _add_subparsers(self):\n runner = self.subparsers.add_parser(\"run\", help=\"Run a Test\")\n runner.add_argument(\"glob\", help=\"A file glob to match config files (default='%(default)s').\",\n metavar=\"<config-file glob>\",\n default=\"*.ini... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Executes the config subprogram with parsed commandline args. | def execute(args):
cfg = Config.instance()
# just print the file location?
if args.location:
print(cfg.config_file)
return
# print sections when none is given
if not args.name:
print("\n".join(cfg.sections()))
return
# print section options when none is given
... | [
"def config_main(args):\n command = args.get(\"command\")\n if command == \"set\":\n set_config(args)\n if command == \"unset\":\n unset_config(args)",
"def main():\n # set up the program to take in arguments from the command line",
"def main():\n args = docopt(__doc__, version='recip... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts that buckets exist. | def test_bucket_exists(self):
self.assertFalse(self.storage.bucket_exists(self.temp_bucket_name))
self.storage.make_bucket(self.temp_bucket_name)
self.assertTrue(self.storage.bucket_exists(self.temp_bucket_name))
self.storage.remove_bucket(self.temp_bucket_name) | [
"def test_buckets_empty(self):\n User.objects.create_user(username='empty', email='user@example.com', password='userexample')\n self.client.login(username='empty', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, 'No bucket y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts presigned urls can be generated for put requests. | def test_put_object_presigned_url(self):
url = self.storage.get_presigned_url(
self.bucket_name, self.object_name, method="PUT"
)
self.assertIn(self.object_name, url) | [
"def test_presigned_url(self):\n # Test we can generate presigned urls for GET and POST requests\n for action in ['GET', 'POST']:\n url = DataManager.generate_presigned_s3_url(bucket='hemlock-highway-test',\n key='customer1/data.csv',\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts presigned urls can be generated for get requests. | def test_get_presigned_url(self):
self.assertRaises(
StorageException,
self.storage.get_presigned_url,
self.bucket_name,
self.object_name,
method=HttpMethod.GET,
)
data, size = str_buffer(self.object_data)
self.storage.put_objec... | [
"def test_presigned_url(self):\n # Test we can generate presigned urls for GET and POST requests\n for action in ['GET', 'POST']:\n url = DataManager.generate_presigned_s3_url(bucket='hemlock-highway-test',\n key='customer1/data.csv',\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts an object can be retrieved from the storage implementation. | def test_get_object(self):
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
data = self.storage.get_object(self.bucket_name, self.object_name)
self.assertEqual(self.object_data, loads(data.read().decode("utf-8"))) | [
"def test_a_storage_type(self):\n self.assertIsNotNone(self.storage.all())",
"def test_storage_2(self):\n with self.assertRaises(TypeError):\n all_objs = storage.all(None)",
"def test_entity_storage_initialization():\n storage = EntityStorage()\n assert isinstance(storage, EntityS... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts an object can be renamed. | def test_rename_object(self):
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
new_object_name = random_str()
self.storage.rename_object(
self.bucket_name, self.object_name, new_object_name
)
sel... | [
"def test_version_rename_error_bad_new_name(self):\n rv, output = self.execute('version rename 1.0 2.0')\n self.assertEqual(2, rv, output)\n self.assertExpectedResult(output)",
"def test_used_as_name_reifier (self):\n self._test_reifiable(self.create_name())",
"def testFileRename(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Asserts it is possible to concat objects. | def test_concat_objects(self):
second_object_name = random_str()
data, size = str_buffer(self.object_data)
self.storage.put_object(self.bucket_name, self.object_name, data, size)
data.seek(0)
self.storage.put_object(
self.bucket_name, second_object_name, data, size
... | [
"def test_concat_fails_empty():\n match = \"No objects to concatenate\"\n with pytest.raises(ValueError, match=match):\n concat([])",
"def test_merge(self):\n ply1 = copy.deepcopy(self.instance)\n ply2 = ply1.merge(self.instance)\n ply1.merge([self.instance], no_copy=True)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the proportion of matches between predicted and actual labels. | def score(predicted: pd.Series, actual: pd.Series) -> float:
return sum(predicted == actual) / len(predicted) | [
"def percentage_pt(y_pred, y_true):\n y_pred_soft = y_pred.exp() / (y_pred.exp().sum(-1)).unsqueeze(-1)\n\n perc = (y_pred_soft.max(dim=1)[1] == y_true.max(dim=1)[1]).sum()\n return perc",
"def percentage_labelled(self,labels):\n\t\tphrases = self.compute_phrases()\n\t\ttotal = len(phrases)\n\t\tlabelled... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert string None to NoneType. | def none_or_str(value):
if value.lower() == 'none':
return None
return value | [
"def _cast_none(x):\n if isinstance(x, six.string_types) and x == data.NONE_MAGIC_VALUE:\n return None\n\n return x",
"def convert_str_or_none(val: Optional[str]) -> Optional[str]:\n return str(val) if val is not None else val",
"def nullify(value: Optional[Any]) -> str:\n return 'null' if va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the social structure | def buildSocialStructure(self):
self.groupNum = self.groupBase ** (self.groupLength-1)
self.indPos = [0 for x in range(self.totalNum)]
self.posInd = [[] for x in range(self.groupNum)]
for i in range(self.groupNum):
groupCount = 0;
for j in range(i*self.groupSize,... | [
"def build_cfg(self):\n for block in self.basic_blocks:\n if not block.ends_unconditional():\n if block.next:\n block.next.parents.append(block)\n block.children.append(block.next)\n targets = block.get_targets()\n if len(t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the potential position based on the current position and distance probability | def getPosition(groupLength, nowPosition, distanceProb):
potentialPos = []
# Here, the distance is a np.array, like [1].
distance = np.random.choice(groupLength, 1, p=distanceProb)[0] + 1
# print ("distance: ", distance)
if distance == 1:
potentialPos.append(nowPosition)
else:
po... | [
"def get_closest(self, pos):\n if self.empty():\n return\n\n min_idx = 0\n min_dist = euclidean(pos, self.npcs[0].get_pos())\n\n for i in range(1, len(self.npcs)):\n dist = euclidean(pos, self.npcs[i].get_pos())\n if dist < min_dist:\n min_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pick an individual from positions. | def pickIndividual(positions, posInd):
potentialInd = []
for i in positions:
for j in posInd[i]:
potentialInd.append(j)
indIndex = np.random.choice(potentialInd, 1)[0]
return int(indIndex) | [
"def _pick_move(self, qvalues):\n unif = random.random()\n if unif < self.qlearning.epsilon: # greedy\n cell = self._greedy_pick_cell(qvalues)\n else: # random\n cell = self._random_pick_cell(qvalues)\n return cell",
"def Pick_point(self, loc):\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure the point is inside the image if it is not, move it to the closest border | def limit_point_to_be_inside_image(point):
smallest_x = 0
smallest_y = 0
largest_x = IMG_HEIGHT-1
largest_y = IMG_WIDTH-1
limited_point = np.int0(np.array([
max(smallest_x, min(point[0], largest_x)),
max(smallest_y, min(point[1], largest_y))
]))
return limited_point | [
"def __validate_point(self, point):\n\n if point.x() < 0:\n point.setX(0)\n\n if point.y() < 0:\n point.setY(0)\n\n img_width = self._data.shape[1] - 1\n if point.x() > img_width:\n point.setX(img_width)\n\n img_height = self._data.shape[0] - 1\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that finds the orange in an image, make a bounding box around it, fits an ellipse in the bounding box and paints everything outside the ellipse in black. Returns the painted image and a boolean stating wheather any orange was found. | def get_pixels_inside_orange(hsv):
hsv_inside_orange = hsv.copy()
hsv_orange_mask = get_orange_mask(hsv)
hsv_save_image(hsv_orange_mask, "2b_orange_mask", is_gray=True)
orange_x, orange_y = np.where(hsv_orange_mask==255)
if len(orange_x) == 0:
# If no orange in image: return origin... | [
"def test_ellipse(self):\n self.ia.open(datapath + \"gal.im\")\n reg = rg.fromtextfile(\n datapath + \"testEllipse90deg.crtf\",\n csys = self.ia.coordsys().torecord(),\n shape=self.ia.shape()\n )\n subi = self.ia.subimage(\"\", region=reg)\n self.i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the angle between the line from the centroid to the arrowhead and the negative xaxis. | def calc_angle_centroid_arrowhead(centroid, arrowhead):
v_1 = arrowhead - centroid
dx, dy = v_1[0], v_1[1]
theta = np.degrees(np.arctan2(dy, -dx))
return theta | [
"def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle",
"def get_line_angle_in_degrees(self, *args, **k... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the angle between the line from the centroid to the goalpoint and the negative xaxis. The goalpoint is between the two inner corners in the H. | def calc_angle_centroid_goal_point(centroid, goal_point):
v_2 = goal_point - centroid
dx, dy = v_2[0], v_2[1]
alpha = np.degrees(np.arctan2(-dy, -dx))
return alpha | [
"def angle(self):\n act_loc = self.thin_face.parent_thin.parent_lattice.z_line\n myo_loc = self.thick_face.get_axial_location(-1)\n ls = self.parent_lattice.lattice_spacing\n angle = np.arctan2(ls, act_loc-myo_loc)\n return angle",
"def arrival_angle(self, last_but_one_x, last_b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter out the corners that belong to a rightangled corner i.e. corners with a mean intensity value around 255/4~64 number_of_corners = len(corners) | def clip_corners_on_intensity(corners, img, average_filter_size):
value_per_degree = 255.0/360.0
min_degree, max_degree = 60, 120 # +- 30 from 90 degrees
# Since 255 is white and 0 is black, subtract from 255
# to get black intensity instead of white intensity
min_average_intensity = 255 - max_degr... | [
"def highlight_significant_corners(corners, image):\n\n # This line is equivalent to the nested loop below, but much faster.\n image[corners > 0.01 * corners.max()] = [0, 255, 0]\n\n # for rowIndex in range(len(corners)):\n # for pixelIndex in range(len(corners[0])):\n # if corners[rowInd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test whether storing distance weights works. | def test_store_weights():
abc = MockABC([{'s1': -1, 's2': -1, 's3': -1},
{'s1': -1, 's2': 0, 's3': 1}])
x_0 = {'s1': 0, 's2': 0, 's3': 1}
weights_file = tempfile.mkstemp(suffix=".json")[1]
print(weights_file)
def distance0(x, x_0):
return abs(x['s1'] - x_0['s1'])
de... | [
"def test_setup_database_consistent(self):\n\t\tself.assertWeightsNonnegative()",
"def test_weights_differ(self):\r\n #\r\n weights = set()\r\n weights.add(coconuts.SouthAsian().weight)\r\n weights.add(coconuts.MiddleEastern().weight)\r\n weights.add(coconuts.American().weight)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a service name, existing url_pattern, and url path prefix, derive a normalized/finalized url path based on them. | def derive_service_path(service_name, url_pattern=None, path_prefix=None):
if not isinstance(service_name, basestring) or not service_name:
raise ValueError("Service name must be a non-empty string.")
path_parts = []
if not isinstance(url_pattern, basestring) or not url_pattern:
url_patter... | [
"def get_url_pattern(urlname, args=[]):\r\n patterns = get_resolver(None).reverse_dict.getlist(urlname)\r\n if not args:\r\n return '/%s' % patterns[0][0][0][0]\r\n\r\n for pattern in patterns:\r\n if pattern[0][0][1] == args:\r\n return '/%s' % pattern[0][0][0]",
"def expand_url... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Once the daily data has been collected, we need to append the extra value and the description to every day. | def populate_extra_data(week_data, description):
for day, day_week_data in six.iteritems(week_data):
value = day_week_data['value']
if day in SQUARE_DAYS:
extra_value = value ** 2
day_week_data['square'] = extra_value
elif day in DOUBLE_DAYS:
extra_value =... | [
"def add_new_data(self, value, comment,\n date=dt.datetime.now().strftime(\"%Y%m%d\")):\n day = pd.DataFrame([{'date': pd.Timestamp(date),\n 'value': int(value), 'comment': comment}])\n self.account = self.account.append(day)\n self.account = self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
decodes by taking the DFT of the received_sequence and channel_impulse_response then apply pointwise division to get the true symbol value | def demodulate_sequence(received_sequence, channel_impulse_response_start, channel_impulse_response_end, N, K, Q1, Q2):
if (N % 2 != 0):
raise ValueError("N must be an even integer")
H_start = dft(channel_impulse_response_start, N)
H_end = dft(channel_impulse_response_end, N)
# phase
x = ... | [
"def _call(self, signal):\n\n signal = preemphasis(signal, self.pre_emph)\n\n frames = framesig(signal,\n self.win_len * self.fs,\n self.win_step * self.fs,\n self.win_fun)\n\n pspec = powspec(frames, self.nfft)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Implements a classification (binary crossentropy) loss function for DragonNet architecture. | def binary_classification_loss(concat_true, concat_pred):
t_true = concat_true[:, 1]
t_pred = concat_pred[:, 2]
t_pred = (t_pred + 0.001) / 1.002
losst = tf.reduce_sum(K.binary_crossentropy(t_true, t_pred))
return losst | [
"def loss(y_true, y_pred):\n return categorical_crossentropy(y_true=y_true, y_pred=y_pred)",
"def loss_func(y_true, y_pred):\n\n return tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1, reduction=\"none\")(y_true, y_pred)",
"def _cross_entropy_loss(self, y_true_clf, y_pred_clf, training_mas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tracks the mean absolute value of epsilon. | def track_epsilon(concat_true, concat_pred):
epsilons = concat_pred[:, 3]
return tf.abs(tf.reduce_mean(epsilons)) | [
"def set_epsilon(self, epsilon):\r\n self.epsilon = epsilon",
"def decay_epsilon(self):\n if self.epsilon > EPSILON_MIN:\n self.epsilon = max(EPSILON_MIN, self.epsilon * EPSILON_DECAY_RATE)",
"def decayed_epsilon(self):\n # exploration rate is never smaller than 0.001 and greater... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get templates on its main and neighboring channels | def on_main_channel(templates):
pass | [
"def probe_templates(self, protocol, model_id):\n if protocol == \"1:1\":\n matches = self._read_match_file(\"1:1\", \"ijbc_11_G1_G2_matches.csv\")[model_id]\n return [self._templates[\"Mixed\"][m] for m in matches]\n elif protocol == \"Covariates\":\n matches = self._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Public method to grab feed from an object | def get_object_feed(self, endpoint, limit=250, since=None, until=None):
endpoint_id = self.get_object_id(endpoint)
url = self.base_url + '/' + endpoint_id + '/feed'
params = {'limit': limit, 'access_token': self.access_token}
if since is not None:
params['since'] = since
... | [
"def get( self ):\n #using urlgrabber so it doesn't matter whether feed is a file or a url\n logger.debug(\"Opening feed: \" + self.feed)\n fd = urlopen( self.feed )\n feed = {}\n #is this an OPML file?\n try:\n outlines = OPML.parse( fd ).outlines\n l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Queries the Facebook API to see if the input is valid. | def is_valid(self, object_name):
try:
resp = self.get_object_id(object_name)
return True
except FacebookError as e:
return False | [
"def validate_data(data):\n if not data.get(\"response_url\"):\n return False\n if not data.get(\"token\"):\n return False\n if not data.get(\"command\"):\n return False\n if not data.get(\"user_name\"):\n return False\n if not data.get(\"channel_name\"):\n return F... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Utility function that determines if this sample should be kept | def should_keep(p_keep=1.):
return random.random() <= p_keep | [
"def should_sample(self):\n return self.span_context.enabled and self.sampler.should_sample",
"def exclude_samples(self):\n return ~self.frame_flagspace.flags.SAMPLE_SOURCE_BLANK",
"def is_sample(run):\n kind = get_kind(run)\n return kind in (\"sesans\", \"sans\")",
"def is_excluded_item(t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function. Loads all the beams in a directory into bricks and data | def _load_beams(self, dirpaths: Iterable[str]) -> [Iterable[Brick], Iterable[Brick]]:
random.seed(self._training['seed'])
beam_filepaths = []
for dirpath in dirpaths:
beam_filepaths.extend([os.path.join(dirpath, filename) for filename in os.listdir(dirpath)])
beam_filepaths ... | [
"def load_beam(self, filepath: str) -> Iterable[Brick]:",
"def load_all_blocks(folder_path):\n\n import os\n import numpy as np\n gt_files = np.sort(os.listdir(folder_path))\n\n block_list=[]\n\n for file in gt_files:\n\n block_path=os.path.join(folder_path, file)\n\n block_list.appen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Loads the bricks for a specific beam and returns it. | def load_beam(self, filepath: str) -> Iterable[Brick]: | [
"def _load_beams(self, dirpaths: Iterable[str]) -> [Iterable[Brick], Iterable[Brick]]:\n\n random.seed(self._training['seed'])\n beam_filepaths = []\n for dirpath in dirpaths:\n beam_filepaths.extend([os.path.join(dirpath, filename) for filename in os.listdir(dirpath)])\n beam... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load graphnet module from string name. | def load_module(class_name: str) -> Type:
# Get a lookup for all classes in `graphnet`
import graphnet.data
import graphnet.models
import graphnet.training
namespace_classes = get_all_grapnet_classes(
graphnet.data, graphnet.models, graphnet.training
)
return namespace_classes[class... | [
"def load(name):\n g = read_graphml(\"graphs//\" + name + \".graphml\", node_type=int)\n return g",
"def load_module(self, name):\n if name not in sys.modules:\n sys.modules[name] = getattr(maps, name.split('.')[2])\n return sys.modules[name]",
"def _LoadDriver(self, name):\n m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct `Dataset` instance from `source` configuration. | def from_config( # type: ignore[override]
cls,
source: Union[DatasetConfig, str],
) -> Union[
"Dataset",
"EnsembleDataset",
Dict[str, "Dataset"],
Dict[str, "EnsembleDataset"],
]:
if isinstance(source, str):
source = DatasetConfig.load(source)
... | [
"def make_data_source(self, batch_size: int, cutoff_date: datetime = None) -> DataSource:\n importers_module = importlib.import_module(\"vulnerabilities.importers\")\n klass = getattr(importers_module, self.data_source)\n\n ds = klass(\n batch_size,\n last_run_date=self.la... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Concatenate multiple `Dataset`s into one instance. | def concatenate(
cls,
datasets: List["Dataset"],
) -> "EnsembleDataset":
return EnsembleDataset(datasets) | [
"def append_datasets(*datasets):\n logging.info(\"Running union on datasets\")\n return reduce(DataFrame.unionAll, datasets)",
"def concat(dataarrays: Sequence[\"DataArray\"], keep=\"last\") -> \"DataArray\":\n from mikeio import Dataset\n\n datasets = [Dataset([da]) for da in dataarrays]\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct `Dataset` for each entry in dict `self.selection`. | def _construct_datasets_from_dict(
cls, config: DatasetConfig
) -> Dict[str, "Dataset"]:
assert isinstance(config.selection, dict)
datasets: Dict[str, "Dataset"] = {}
selections: Dict[str, Union[str, List]] = deepcopy(config.selection)
for key, selection in selections.items()... | [
"def _construct_dataset_from_list_of_strings(\n cls, config: DatasetConfig\n ) -> \"Dataset\":\n assert isinstance(config.selection, list)\n datasets: List[\"Dataset\"] = []\n selections: List[str] = deepcopy(cast(List[str], config.selection))\n for selection in selections:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct `Dataset` for each entry in list `self.selection`. | def _construct_dataset_from_list_of_strings(
cls, config: DatasetConfig
) -> "Dataset":
assert isinstance(config.selection, list)
datasets: List["Dataset"] = []
selections: List[str] = deepcopy(cast(List[str], config.selection))
for selection in selections:
config... | [
"def _to_dataset(self):\n from mikeio import Dataset\n\n return Dataset(\n {self.name: self}\n ) # Single-item Dataset (All info is contained in the DataArray, no need for additional info)",
"def build_eval_dataset(self):\n pass",
"def build_synthetic_dataset(self):\n pass... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Name of the table containing eventlevel truth information. | def truth_table(self) -> str:
return self._truth_table | [
"def __str__(self):\n return_string = \"Truth Table type=\"\n return_string += 'REPORTING' if self.type == TruthTableType.REPORTING else 'TRANSITION'\n return_string += '\\n'\n for k,v in self.header.items():\n if k not in ['next_state', 'output']:\n return_stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the event index corresponding to a `sequential_index`. | def _get_event_index(
self, sequential_index: Optional[int]
) -> Optional[int]: | [
"def _get_index(self, beacon_config, label):\n\n indexes = [index for index, item in enumerate(beacon_config) if label in item]\n if not indexes:\n return -1\n else:\n return indexes[0]",
"def get_index(self) -> int:\n return self._index",
"def get_index(self, l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add custom graph label define using function `fn`. | def add_label(
self, fn: Callable[[Data], Any], key: Optional[str] = None
) -> None:
if isinstance(fn, Label):
key = fn.key
assert isinstance(
key, str
), "Please specify a key for the custom label to be added."
assert (
key not in self._la... | [
"def axis_label(label):\n\n def result(func):\n func.__axis_label__ = label\n return func\n\n return result",
"def id_for_label(value):\n return f\"labels->{value}\"",
"def set_label(self, label):",
"def handle_label(self, label, namespace):\n raise NotImplementedError",
"def a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return graph `Data` object at `index`. | def __getitem__(self, sequential_index: int) -> Data:
if not (0 <= sequential_index < len(self)):
raise IndexError(
f"Index {sequential_index} not in range [0, {len(self) - 1}]"
)
features, truth, node_truth, loss_weight = self._query(
sequential_index... | [
"def index(self,index):\n node = self.front\n counter = 0\n while counter < index:\n node = node.getNext()\n counter += 1\n return node.getData()",
"def datum(self, *index):\n data = self.get_data(None)\n if data is None:\n raise ValueErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Resolve selection as string to list of indices. Selections are expected to have pandas.DataFrame.querycompatible syntax, e.g., ``` "event_no % 5 > 0" ``` Selections may also specify a fixed number of events to randomly sample, e.g., ``` "10000 random events ~ event_no % 5 > 0" "20% random events ~ event_no % 5 > 0" ``` | def _resolve_string_selection_to_indices(
self, selection: str
) -> List[int]:
return self._string_selection_resolver.resolve(selection) | [
"def _sample_to_idxs(df: pd.DataFrame, sample: str) -> List[int]:\n if sample.startswith((\"SRR\", \"DRR\", \"ERR\")):\n idxs = df.index[df.run_accession == sample].tolist()\n assert len(idxs) == 1, f\"sample {sample} with idxs: {idxs}\"\n elif sample.startswith((\"SRX\", \"ERX\", \"DRX\")):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove columns that are not present in the input file. Columns are removed from `self._features` and `self._truth`. | def _remove_missing_columns(self) -> None:
# Check if table is completely empty
if len(self) == 0:
self.warning("Dataset is empty.")
return
# Find missing features
missing_features_set = set(self._features)
for pulsemap in self._pulsemaps:
mis... | [
"def drop_features(self):\n\n if self.drop_columns is not None:\n cols = self.features_df.columns\n feature_list = list()\n for col in cols:\n if (col.split('_')[0] in self.drop_columns) | (col in self.drop_columns):\n feature_list += [col]\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list missing columns in `table`. | def _check_missing_columns(
self,
columns: List[str],
table: str,
) -> List[str]:
for column in columns:
try:
self.query_table(table, [column], 0)
except ColumnMissingException:
if table not in self._missing_variables:
... | [
"def get_columns_with_missing_values(self): #df dataframe\n missing_df = self.get_count_of_missing_values()\n missing_data = missing_df[missing_df[0] != 0]\n return missing_data",
"def get_missing_columns(column_labels: set, df: pd.DataFrame) -> List:\n return [\n column\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return iterator for sequence of strongly connected components. | def __iter__(self):
return iter(self._components) | [
"def iterator(self, *args, **kwargs):\n return _decomp.component_set_iterator(self, *args, **kwargs)",
"def iter(self) -> Iterator[Sequence]:\n ...",
"def __iter__(self):\n for coreg in self.pipeline:\n yield coreg",
"def __iter__(self):\n for node in self.nodes(): ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the property access on the auto config workflow. | def test_auto_configure_properties(project):
config_name = "Test"
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert auto_config.design_execution is None
assert auto_config.score is None
assert len(auto_config.candidates) == 0 | [
"def test_config_customproperties_get(self):\n pass",
"def test_config_get(self):\n pass",
"def test_properties(self):\n prj = self._read_string(\"\"\"\nProject 3372\n $prop1 value1\nTask a\n $prop2 value2\n $prop3 \"long property value\"\n\"\"\")\n self.assertEqual(prj.getP... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the score execution on auto config workflow. | def test_auto_config_execute(project):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.predictors.register(resources["predictor"])
project.predictor_evaluation_workflows.register(resources["pew"])
project.design_sp... | [
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_configure_pre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the table build stage of auto configure. | def test_auto_config_table_build(project):
config_name = "Test"
auto_config = AutoConfigureWorkflow(project=project, name=config_name)
assert len(auto_config.assets) == 0
auto_config._table_build_stage(
material="Fake Material",
mode=AutoConfigureMode.PLAIN
)
assert len(auto_con... | [
"def test_build_from_database(self):",
"def build_tables():\n yield setup_tables()\n IOLoop.current().stop()",
"def test_build_creation(self):",
"def test_auto_configure_design_space_build(project):\n config_name = \"Test\"\n resources = default_resources(config_name)\n project.table_configs.re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the predictor registration stage of auto configure. | def test_auto_configure_predictor_registration(project):
# Start from having a table config and table
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
auto_config... | [
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_configure_pre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the predictor evaluation stage of auto configure. | def test_auto_configure_predictor_evaluation(project, caplog):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
project.predictors.register(resources["predictor"])
... | [
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_config_execut... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test the design space build stage of auto configure. | def test_auto_configure_design_space_build(project):
config_name = "Test"
resources = default_resources(config_name)
project.table_configs.register(resources["table_config"])
project.tables.build_from_config(resources["table_config"])
project.predictors.register(resources["predictor"])
project.p... | [
"def test_auto_configure_properties(project):\n config_name = \"Test\"\n auto_config = AutoConfigureWorkflow(project=project, name=config_name)\n\n assert auto_config.design_execution is None\n assert auto_config.score is None\n assert len(auto_config.candidates) == 0",
"def test_auto_config_table_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle entity registry update events. | def entity_registry_updated(self, event):
if event.data["action"] == "remove":
self.remove_empty_devices() | [
"async def update_entity_registry() -> None:\n\n get_base().entities = await hass_entities()",
"async def update_registries() -> None:\n\n await update_area_registry()\n await update_entity_registry()",
"def on_entity_update(self, event):\n self.entity.cubolt_entity.on_entity_update(event)",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove devices with no entities. | def remove_empty_devices(self):
entity_registry = er.async_get(self.hass)
device_registry = dr.async_get(self.hass)
device_list = dr.async_entries_for_config_entry(
device_registry, self.config_entry.entry_id
)
for device_entry in device_list:
entities = ... | [
"def destroy(self):\n for item in self.__dict__:\n self.removeDevice(item)",
"def delete(self, *devices):\n for d in devices:\n d.delete()",
"def test_setup_component_without_devices(self):\n self.hass.data[dyson.DYSON_DEVICES] = []\n add_devices = MagicMock()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Via the requests library, performs a post with the payload to the path | def post(self, payload={}, path=""):
return requests.post(self.base_url + path,
data=json.dumps(payload),
headers=self.headers) | [
"def post(self, path, data):\n return(self._request('POST', path, json=data))",
"def _post(self, path, data):\n url = self._url(path)\n if self.debug:\n pprint(data)\n resp = self.session.post(url, data=data)\n if self.debug:\n print('CODE', resp.status_cod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Via the requests library, performs a put with the payload to the path | def put(self, payload={}, path=""):
return requests.put(self.base_url + path,
data=json.dumps(payload),
headers=self.headers) | [
"def put(self, path, data):\n return(self._request('PUT', path, json=data))",
"def _put(self, subpath: str, data: any = None) -> None:\n self._request('put', subpath, data, None)",
"def put(self, url, params=b'', headers=None, extra_environ=None,\n status=None, upload_files=None, expec... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Via the requests library, performs a patch with the payload to the path | def patch(self, payload={}, path=""):
return requests.patch(self.base_url + path,
data=json.dumps(payload),
headers=self.headers) | [
"def method_patch(self, uri, **kwargs):\r\n return self._api_request(uri, \"PATCH\", **kwargs)",
"def sample_patch_request(host, username, password, resource, data):\n # build the URL\n url = urlunparse(('https', host, resource, None, None, None))\n print \"PATCH: %s\" % url\n\n return requests.pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Via the requests library, performs a get with the payload to the path | def get(self, payload={}, path=""):
return requests.get(self.base_url + path,
data=json.dumps(payload),
headers=self.headers) | [
"def get(path, **kwargs):\n return generic_request('GET', path, **kwargs)",
"def get(self, path, **kwargs):\n return(self._request('GET', path, params=kwargs))",
"def _get(self, url, query_params=None):\n return self._request(\"GET\", url, query_params)",
"def _get(self, url, *args, **kwargs)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Via the requests library, performs a delete with the payload to the path | def delete(self, payload={}, path=""):
return requests.delete(self.base_url + path,
data=json.dumps(payload),
headers=self.headers) | [
"def delete_request():",
"def _delete(self, subpath: str) -> None:\n self._request('delete', subpath, None, None)",
"def test_request_delete(self):\n r = self.base._request('/delete', 'DELETE', {\n 'foo': 'bar'\n })\n self.assertEqual(r['url'], 'https://httpbin.org/delete?... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes comments and extra spaces from the file and put it's commands in a list | def _clean(self):
lines = self.__file.readlines()
for line in lines:
command = line.replace('\n', '')
if not command.startswith('//') and command != '':
result = command.split()
self.__vm_commands.append(result) | [
"def initialise(input_file):\n\tlines = [] \t\t\t# stores lines from input_file \n\tcommands = []\t\t# stores commands from \"cleaned\" lines\n\n\twith open(input_file) as f:\n\t\tfor line in f:\n\t\t\tlines.append(line)\n\n\tfor line in lines:\n\t\tif line[0] != \"/\" and line != \"\\n\":\n\t\t\t# ignore whitespac... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create thumbnails for company company logo. | def create_company_logo_thumbnails(company_id):
create_thumbnails(
pk=company_id, model=Company, size_set="company_logos", image_attr="logo"
) | [
"def logo():",
"def test_create_thumbnail(self):\r\n\r\n raise SkipTest\r\n\r\n if not os.path.isdir(TEST_OUT):\r\n os.makedirs(TEST_OUT)\r\n\r\n input_raster_uri = os.path.join(REGRESSION_DATA, 'png_reg_raster.png')\r\n output_uri = os.path.join(TEST_OUT, 'png_thumbnail.png... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
connects to broker and provides convenience methods | def __init__(self, hostname, port, vhost, userid, password):
self.broker = BrokerConnection(hostname=hostname, port=port,
userid=userid, password=password,
virtual_host=vhost) | [
"def __init__(self, broker):\n\n self.broker = broker",
"def connect_mqtt(self):\n\n\t\tdef on_connect(client, userdata, flags, rc):\n\t\t\t\"\"\"\n\t\t\tThis method is the callback for a connection try.\n\t\t\t:param client: the client\n\t\t\t:param userdata: the submitted userdata\n\t\t\t:param flags: th... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
declares the exchange, the queue and binds the queue to the exchange exchange exchange name exchange_type direct, topic, fanout binding binding to queue (optional) queue queue to bind to exchange using binding (optional) | def declare(self, exchange, exchange_type, binding="", queue=""):
if (binding and not queue) or (queue and not binding):
if queue and not exchange_type == "fanout":
raise Error("binding and queue are not mutually exclusive")
consumer = Consumer(connection=self.broker,
... | [
"def declare_exchange_to_queue_binding(self, exchange: str, queue: str, *, routing_key: str = \"\",\n arguments: dict = None,\n vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
publish a message to exchange using routing_key exchange name of exchange routing_key interpretation of routing key depends on exchange type message message content to send | def publish(self, exchange, routing_key, message,
auto_declare=False, persistent=True):
delivery_mode = 2
if not persistent:
delivery_mode = 1
publisher = Publisher(connection=self.broker,
exchange=exchange, routing_key=routing_key,
... | [
"def publish_message(self, exchange: str, *, routing_key: str = \"\", payload: str = \"\",\n payload_encoding: str = \"string\",\n properties: dict = None, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
decode message envelope args message_data encoded message envelope (see encode_message) secret secret key to decrypt message (if encrypted) returns (sender, content, timestamp) sender message sender content content string (plaintext) timestamp datetime instance | def decode_message(message_data, secret=None):
sender = str(message_data['sender'])
content = base64.urlsafe_b64decode(str(message_data['content']))
timestamp = datetime(*map(lambda f: int(f), message_data['timestamp-utc']))
if message_data['encrypted']:
content = decrypt(content, secret)
... | [
"def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A taks to display the mosaic image in dataserver | def displayMosaic( fitsfd ):
myDS9 = ds9()
fname = fitsfd.filename()
myDS9.set( "file mosaicimage {}".format(fname) )
myDS9.set("zoom to fit")
return fitsfd | [
"def test_raft_image_mosaic(self):\n infiles = sorted(glob.glob(os.path.join(_root_dir, 'S??',\n '*_lambda_flat_1000_*.fits')))\n infiles = OrderedDict([(filename.split('/')[-2], filename)\n for filename in infiles])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
starts the uploader and shows bounding boxes for a detected face | def run(self):
cv2.namedWindow(consts.UPLOADER_WINDOW)
# TODO : video capture source should be handled by camera.py and /
# not default 0(webcam)
self.camera = cv2.VideoCapture(0)
while self.camera.isOpened() and self.ready_to_detect_face:
_, frame = self.came... | [
"def run(self):\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if ret:\n boxes, face_probs = self.mtcnn.detect(frame)\n if boxes is not None and len(boxes) > 0:\n name_probs = []\n for box ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
detects the face in the frame | def _detect_face(self, frame):
face_coords = list()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = self.detector(gray, 0)
print(rects)
# get bounding box for every face in the frame
for i, d in enumerate(rects):
x1 = d.left()-consts.PADDING
... | [
"def ffp_detect(self, img):\r\n # convert to gray\r\n if img.ndim > 2:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n # detect face first\r\n bbox = self.face_detect(img).flatten()\r\n num_pts = self.face_lmks_model['num_pts']\r\n norm_width ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pickle an object, and zip the pickle before sending it | def send_zipped_pickle(socket, obj, flags=0, protocol=2):
p = pickle.dumps(obj, protocol)
# z = zlib.compress(p, 8)
return socket.send(p, flags=flags) | [
"def send_zipped_pickle(socket, obj, flags=0, protocol=2):\n p = pickle.dumps(obj, protocol)\n # z = zlib.compress(p, 8)\n return socket.send(p, flags=flags)",
"def save_object(object, filename, protocol = cPickle.HIGHEST_PROTOCOL):\r\n gc.disable()\r\n if filename.endswith('.zip'):\r\n f = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the elapsed time display. Invoked once per second | def refresh_time(self):
if (self.enabled):
self.elapsed_time += ONE_SECOND
#self.window.set_title("stopwatch %s" % self.elapsed_time)
self.time_counter.set_text(str(self.elapsed_time))
return True | [
"def update():\n seconds = 0 if self.start_time == 0 else round(time.time() - self.start_time)\n hours = seconds // 3600\n seconds = seconds % 3600\n minutes = seconds // 60\n seconds = seconds % 60\n cur_time = \"\"\n if hours < 10:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset the elapsed time back to zero. Invoked when the reset button is pressed. | def reset_time(self, widget, data=None):
self.elapsed_time = datetime.timedelta()
self.time_counter.set_text(str(self.elapsed_time))
return | [
"def OnResetTime(self, event):\n self.modelview.timezero(0)",
"def reset_timing(self):\n\n self.timing_start = self.current_time()",
"def reset_time(self):\n\n self._alive_time = 0 # No need to signal the change, since the view is updated by the value toggle",
"def reset(self) -> None:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Power (watts) = slope x speed (km/h) + intercept Level 1 2 3 4 5 6 7 8 9 10 Slope 3.73 5.33 6.87 8.27 10.07 11.4 13.13 14.4 15.93 17.73 Intcpt 28.67 36.67 43.33 47.33 66.33 67.00 83.67 82.00 89.67 114.67 | def calcPower(speed, resistance_level):
satoridata = [
{
'level': 1,
'slope': 3.73,
'intercept': -28.67
},
{
'level': 2,
'slope': 5.33,
'intercept': -36.67
},
{
'level': 3,
... | [
"def measurePower(self,low):\n if math.fabs(low[0]) > 2.0:\n return 100.0\n self._awg.setOffset(self._awgChannel,low[0])\n minimum = self.measureAveragePower()\n print \"Measuring power at %g : %g\" % (low[0],minimum)\n self.d.set(minimum=minimum, offset=low[0])\n self.d.commit()\n linpowe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the whole binary pattern for lengh k | def generate(k):
for i in range(int(math.pow(2, k))):
pat = []
count = i
for j in range(k):
pat.append(count%2)
count = count >> 1
wholePattern.append(list(reversed(pat))) | [
"def InitializeBinary(k):\n b = np.zeros([k, ], dtype=np.int8)\n\n \"\"\" Primal test, half of b set to 1, and another half is 0 \"\"\"\n choice = np.random.choice(k, k // 2)\n b[choice] = 1\n return b",
"def gen_all_n_length_bitsrings(n):\n for i in range(1 << n):\n yield '{:0{}b}'.forma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return package Git URL. | def git_url(cls) -> str:
return cls.url() + ".git" | [
"def pip_url(self, repo, package, version):\n return 'git+ssh://git@{0}/{1}/{2}.git@{4}#egg={3}-{4}'.format(\n self.host,\n self.vendor,\n repo,\n package,\n version\n )",
"def package_repo_url(self, package_name):\n s = self._repos[packa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return MeCab version the package depends on. | def depends_mecab_version(cls) -> str:
return "0.996" | [
"def get_version():\n import pkg_resources # part of setuptools\n return pkg_resources.require(\"mbed-ls\")[0].version",
"def get_version():\n from pkg_resources import get_distribution\n return get_distribution('funkload').version",
"def get_version_from_package() -> str:\n\n path = os.path.joi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of absolute paths to documentation files. | def misc_docs(cls) -> List[str]:
path_to_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mecab-ipadic-neologd')
return [
os.path.join(path_to_root, 'README.md'),
os.path.join(path_to_root, 'README.ja.md'),
os.path.join(path_to_root, cls.changelog_fil... | [
"def get_documentation_files ():\n installpath = os.path.join (\"share\", \"doc\", \"ocempgui\")\n docpaths = get_directory_list (\"doc\")\n\n # Traverse all the directories in the docpath an get the needed files.\n # Every file installed from the docs will have a suffix.\n filedict = {}\n for pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Hardlink dictionary files from input directory and documentation to target directory. | def _link_dictionaries_and_docs(cls, input_dir: str, lib_dir: str, doc_dir: str, config: PackageConfig) -> None:
cls._mkdir_p(lib_dir)
cls._mkdir_p(doc_dir)
logging.info('Linking MeCab files to library directory...')
for filename in os.listdir(input_dir):
full_filename = os.... | [
"def linkFileDict(fileDict, interactive):\n for f in fileDict:\n target = buildDotfilesPath(f)\n linkName = buildHomePath(fileDict[f])\n linkFile(target, linkName, interactive)",
"def create_reference_files(self):\n params = self.params\n params.ref_paths = {}\n\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return package type to use for arguments. | def package_type(cls) -> str:
raise NotImplemented("Abstract method.") | [
"def package_type(self):\n ret = self._get_attr(\"packageType\")\n return ret",
"def get_pkg_type():\n plt = get_os_name()\n if plt in PACK_TYPES:\n return PACK_TYPES[plt]\n raise UnsupportedOsError(f'No supported Package type for platform \"{plt}\"')",
"def type(self):\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate and return Bintray descriptor JSON for a package. | def bintray_descriptor_json(bintray_repository_name: str,
bintray_subject: str,
version: str,
revision: str,
version_tag: str,
package_path: str,
config... | [
"def package_json(context: Context):\n context.write_template('package.json')",
"def create_datadescript(input_dir):\n print(f\"Creating a simple dataset_description.json in {input_dir}... \")\n name = Path(input_dir).stem\n vers = bids.__version__\n out = dict(Name=name, BIDSVersion=vers)\n wit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generate column with missing pid for subclasses, append label, concat the three properties, shuffle data | def get_testing_set(subclass_test, types_test, negatives_test):
# get difference between three columns
difference_column = str(set(negatives_test.columns[7:]).difference(subclass_test.columns[7:])).replace('{', '')
difference_column = difference_column.replace('}', '')
difference_column = differenc... | [
"def process_pclass(combined_data):\n # encoding into 3 categories:\n pclass_dummies = pd.get_dummies(combined_data['Pclass'], prefix=\"Pclass\")\n\n # adding dummy variables\n combined_data = pd.concat([combined_data, pclass_dummies], axis=1)\n\n # removing \"Pclass\" since it's no loner needed\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check which zodiac sign | def zodiac_sign(sign_num):
global COLUMNS
CAPRICORN = 119
AQUARIUS = 218
PISCES = 320
ARIES = 419
TAURUS = 520
GEMINI = 620
CANCER = 722
LEO = 822
VIRGO = 922
LIBRA = 1022
SCORPIO = 1121
SAGGITARIUS = 1221
if sign_num <= CAPRICORN:
print("You are a CAPRIC... | [
"def get_zodiac_sign(day, month):\n if month == 3: #March\n if day <= 20:\n return 'Pisces'\n else:\n return 'Aries'\n if month == 4: #April\n if day <=19:\n return 'Aries'\n else:\n return 'Taurus'\n if month == 5: #May\n if da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets list of blocked (blacklisted) keys that should not be accepted. Typically, this functionality is used to block keys for refunded purchases or pirated keys so that they aren't recognized as valid anymore. The data bytearray contains SHA1 hashes of blocked keys (20 bytes per hash). The hashes are computed from licen... | def set_blocked_keys(self, data):
# must be kept around, because el_set_blocked_keys() doesn't make a copy
self._blocked_keys_data = create_string_buffer(data)
_impl.el_set_blocked_keys(byref(self._blocked_keys_data), len(data)) | [
"def test_blacklisted_key(self):\n\n key = \"1QCC5-W30DP-FGFRG-K1JEF-QUDLP\"\n KeyValidator.add_key_to_blacklist(key)\n key_status = KeyValidator.check_key(key)\n self.assertEqual(KeyStatus.BLACKLISTED, key_status)",
"def black_list_checking(self,meta):\n variable_length_message... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verifies that the license key associated with 'name' is valid. | def verify_license_key(self, key, name):
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(name, str):
name = name.encode('utf-8')
return bool(_impl.el_verify_license_key(self.ctxt, key, name)) | [
"def check_license(self, name):\n [license_] = self._license(name).licenses\n if not _license_is_active(license_):\n if (\n license_.expiration\n and datetime.strptime(license_.expiration, \"%Y-%m-%d\").date()\n < date.today()\n ):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a new license key for 'name'. | def generate_license_key(self, name):
if isinstance(name, str):
name = name.encode('utf-8')
size = _impl.el_generate_license_key(self.ctxt, name, None)
buf = create_string_buffer(size)
if _impl.el_generate_license_key(self.ctxt, name, byref(buf)) == -1:
raise Runt... | [
"def generate_key(ctx, name):\n click.echo(f\"Generating key file {name}.key...\")\n\n # key generation\n key = Fernet.generate_key()\n\n # string the key in a file\n with open(f'{name}.key', 'wb') as file_key:\n file_key.write(key)\n\n click.echo(f\"Key file {name}.key successfully generat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collapse feature table to user specified taxa level (ASV by default). | def collapse_taxa(feature_table_artifact, taxonomy_artifact, collapse_level="asv"):
collapse_level = collapse_level.lower()
if(collapse_level not in VALID_COLLAPSE_LEVELS):
raise AXIOME3Error("Specified collapse level, {collapse_level}, is NOT valid!".format(collapse_level=collapse_level))
# handle ASV case
if(... | [
"def collapse_tax(self):\n try:\n for level in self.inputs['levels']:\n if level != 'otu':\n for x in list(self.levels['otu']):\n self.levels[level][x] = _data_bin(self.otu[x], self.n[level], level + '_' + x)\n self.write_bioms()\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter dataframe by a specified column by abundance | def filter_by_abundance(df, abundance_col, cutoff=0.2):
if(abundance_col not in df.columns):
raise AXIOME3Error("Column {col} does not exist in the dataframe".format(col=abundance_col))
filtered_df = df[df[abundance_col] >= cutoff]
if(filtered_df.shape[0] == 0):
raise AXIOME3Error("No entries left with {cutoff... | [
"def filter(self, df):\n pass",
"def filter_df(df, filter_column, filter_value):\n return df[df[filter_column] == filter_value]",
"def select_taxa_from_summary(df, taxa):\n df['filter'] = df.taxa.apply(\n lambda x: True if int(taxa) in x else False)\n return df[df['filter'] == True]",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Alphabetically sort dataframe by a given column Input; | def alphabetical_sort_df(df, cols):
for col in cols:
if(col not in df.columns):
raise AXIOME3Error("Column {col} does not exist in the dataframe".format(col=col))
sorted_df = df.sort_values(by=cols)
return sorted_df | [
"def sort_by(df, column_name): # reformat date so the sorting starts from year-month-date\n\n df = df.sort_values(by=[column_name]) # sort the reformat date value\n return df",
"def order_by_col(df: DataFrame, column: str) -> DataFrame:\n df = df.sort(col(column).asc())\n return df",
"def sort_df(d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Chopping dna into small (150 bp) peices | def chop_dna(dna):
read_len = 150
max_ovl = 50
min_coverage = 5
out = []
dna_len = len(dna)
base_id = dna.id
starts = []
start = 0
read_n = math.floor((dna_len - max_ovl)/(read_len - max_ovl))
if read_n > 1:
ovl_len = (read_len * read_n - dna_len)/(read_n - 1)
else:
... | [
"def flower(pen, n, size):\n for i in range(n):\n petal(pen, size)\n pen.right(360/n)",
"def get_seedling_objects(image, min_size=4000, max_size=80000):\n\n filled = erode(image)\n labelled = label_objects(filled)\n big_objs = keep_objects_in_bracket(labelled, min_size, max_size)\n pl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply automatic hysteresis thresholding. Apply automatic hysteresis thresholding by automatically choosing the high and low thresholds of standard hysteresis threshold. low_prop is the proportion of edge pixels which are above the low threshold and high_prop is the proportion of pixels above the high threshold. | def hyst_thresh_auto(edges_in: np.array, low_prop: float, high_prop: float) -> np.array:
######################################################
# calculate thresholds based on the proportion values
l_thres = np.percentile(edges_in, (1.-low_prop)*100)
h_thres = np.percentile(edges_in, (1.-high_prop)*100... | [
"def hysteresis_threshold(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def hysteresis_threshold(image, low_threshold, high_threshold):\r\n high_mask = image > high_threshold\r\n low_mask = image > low_threshold\r\n return ndimage.binary_dilation(high_mask, mask=low_mask,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write out the extracted spectrum to a text file. If the file already exists, this will not overwrite it. The first For each spectrum in ap_list, it will add a columns onto the output file so that the first column is always wavelength, the second column is flux, and the third column is sigma, and then repeat the flux an... | def write_extract_text(ofile, ap_list, clobber=False):
if os.path.isfile(ofile) and not clobber: return
#open the file
dout=saltio.openascii(ofile, 'w')
#first extract warr, assume it is the same for all frames
warr=ap_list[0].wave
#write out the spectrum
for i in range(len(warr)):
... | [
"def write_extract_fits(ofile, ap_list, clobber=False):\n #delete the file\n if os.path.isfile(ofile) and clobber: saltio.delete(ofile)\n\n #create the primary array\n hdu = pyfits.PrimaryHDU()\n hdulist = pyfits.HDUList([hdu])\n\n #create the columns and the \n for ap in ap_list:\n fvar=abs(ap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write out the extracted spectrum to a FITS table. If the file already exists, this will not overwrite it. For each spectrum in ap_list, it will add another extension to the fits file. Each extension will have the first column as wavelength, the second column as counts, and the third column as sigma on the counts. | def write_extract_fits(ofile, ap_list, clobber=False):
#delete the file
if os.path.isfile(ofile) and clobber: saltio.delete(ofile)
#create the primary array
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
#create the columns and the
for ap in ap_list:
fvar=abs(ap.lvar)**0.5
... | [
"def new_fits(outfile, **kwargs):\n # Fake data\n sci_data = numpy.arange(10000, dtype='float').reshape(100,100)\n err_data = numpy.sqrt(sci_data) # Poisson error\n dq_data = numpy.zeros(sci_data.shape, dtype='int16') # No bad pixel\n\n # Create individual extensions\n hdu_hdr = pyfits.PrimaryH... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the folder of this script with frozen compatibility the folder of THIS script. | def __get_this_folder():
return os.path.split(os.path.abspath(os.path.realpath(
__get_this_filename())))[0] | [
"def scripts_folder(self):\n return ScriptUtils.defaultScriptsFolder()",
"def lock_directory(self):\n from os.path import join, basename\n return join(self._parent_directory, \".\" + basename(self.filename) + \"-pylada_lockdir\")",
"def get_frozen_version(self):\n # todo: put this to use\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |