query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns the request for the existing Firewall resource. | def _GetGetRequest(self, client, resource_reference):
return (client.apitools_client.firewalls, 'Get',
client.messages.ComputeFirewallsGetRequest(
firewall=resource_reference.Name(),
project=resource_reference.project)) | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n devices: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FirewallDeviceArgs']]]]] = None,\n disabled: Optional[pulumi.Input[bool]] = None,\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a Hello. If 'name' was an argument, greet the name, otherwise a random name is retrieved from the model | def get(self):
if self.args['name'] == '':
name = self.model.get_random_name()
else:
name = self.args['name']
return self.jsonify({'message': 'Hello {:s}!'.format(name)}) | [
"def say_hello(name):\n return 'Hello, {} '.format(name)",
"def hello(self, message, args):\n if args.favorite_number is None:\n return \"Hello {name}\".format(name=args.name)\n else:\n return \"Hello {name}, I hear your favorite number is {number}\".format(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper code to compute average word length of a name | def average_word_length(self, name):
return np.mean([len(word) for word in name.split()]) | [
"def get_avg_word_length(lyrics):\n\n\tlyrics = lyrics.translate(str.maketrans('','',string.punctuation))\n\treturn round(sum([len(word) for word in lyrics.split()]) / len(lyrics.split()),2)",
"def get_average_word_length(self):\n\n if self.word_count_list is None:\n self.tokenize_documents()\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enter the Case_Number from which you want to extract the year. This function takes the first for digits frome the entered data | def Year(Case_Number):
for x in Case_Number:
return Case_Number[:4] | [
"def get_year(msg):\n year = input(msg)\n if re.match(\"[1-3][0-9]{3}\", year) and len(year) == 4:\n return year\n else:\n print(\"Enter correct year!\")\n return get_year(msg)",
"def get_year(text):\n # type: (str) -> int\n year = re.search(r\"\\d{4}\", text)\n return int(y... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
gets a dictionary of boolean arrays for each sampling point. These are ultimately derived from shape files, but if possible this function will load a pickled dictionary | def get_drn_samp_pts_dict(recalc=False):
pickle_path = "{}/inputs/pickeled_files/drn_samp_pts.p".format(sdp)
if os.path.exists(pickle_path) and not recalc:
drn_con_samp_pts = pickle.load(open(pickle_path))
return drn_con_samp_pts
# load all shapefiles in base_shp_path
base_shp_path = "{... | [
"def import_sample_map_array(self):\n if self.sample_map_array is None:\n if self.sample_map.file_name == \"null\":\n self.sample_map_array = np.ones((self.sample_map.x_size, self.sample_map.y_size))\n else:\n self.sample_map.open()\n if self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes a request to the token endpoint by sending the `post_data` parameters using the 'application/xwwwformurlencoded' format. | def _post_request(self, post_data, extras={}):
url = reverse('oidc_provider:token')
request = self.factory.post(
url,
data=urlencode(post_data),
content_type='application/x-www-form-urlencoded',
**extras)
response = TokenView.as_view()(request)
... | [
"def post(self, query_string, data=None):\n if data:\n data[\"__FORM_TOKEN\"] = self._get_form_token()\n return self.session.post(self.base_url + query_string, data=data)",
"def test_create_token_exchange_using_post(self):\n pass",
"def _request_token(self):\n response = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scope is ignored for token respones to auth code grant type. This comes down to that the scopes requested in authorize are returned. | def test_scope_is_ignored_for_auth_code(self):
SIGKEYS = self._get_keys()
for code_scope in [['openid'], ['openid', 'email'], ['openid', 'profile']]:
code = self._create_code(code_scope)
post_data = self._auth_code_post_data(
code=code.code, scope=code_scope)
... | [
"def scope(self) -> dict:\n scope = self._auth_token.scope\n if not isinstance(scope, dict):\n raise ValueError(\"Token's scope claim must be of type 'dict'\")\n if \"admin\" not in scope or \"spotify\" not in scope:\n raise ValueError(\"'admin' and 'spotify' must be in to... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extending scope in refresh token is not allowed. Try to get a refresh token with "profile" in the scope even though the original authorized scope in the authorization code request is only ['openid', 'email']. | def test_refresh_token_invalid_scope(self):
self.do_refresh_token_check(scope=['openid', 'profile']) | [
"def test_refresh_token_narrowed_scope(self):\n self.do_refresh_token_check(scope=['openid'])",
"def refresh_access_information(self, refresh_token):\n if self.config.grant_type == 'password':\n data = {'grant_type': 'password',\n 'username': self.config.user,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Narrowing scope in refresh token is allowed. Try to get a refresh token with just "openid" in the scope even though the original authorized scope in the authorization code request is ['openid', 'email']. | def test_refresh_token_narrowed_scope(self):
self.do_refresh_token_check(scope=['openid']) | [
"def test_refresh_token_invalid_scope(self):\n self.do_refresh_token_check(scope=['openid', 'profile'])",
"def get_original_scopes(self, refresh_token, request, *args, **kwargs):\r\n log.debug('Obtaining scope of refreshed token.')\r\n tok = self._tokengetter(refresh_token=refresh_token)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The authorization server support including the client credentials in the requestbody using the `client_id` and `client_secret`parameters. | def test_client_authentication(self):
code = self._create_code()
# Test a valid request to the token endpoint.
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
self.assertNotIn(
'invalid_client',
response.co... | [
"def _authenticate_client(self, client, secret):\n credentials = str(base64.b64encode(str.join(':', [client, secret]).encode('utf-8')))[2:-1]\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Cache-Control': 'no-cache',\n 'Authorization': 'Basic '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If present in the Authentication Request, Authorization Servers MUST include a nonce Claim in the ID Token with the Claim Value being the nonce value sent in the Authentication Request. If the client does not supply a nonce parameter, it SHOULD not be included in the `id_token`. | def test_access_token_contains_nonce(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].en... | [
"def getNonce(self):\n return self[Header.PARAM_NONCE] if Header.PARAM_NONCE in self else None",
"def getCNonce(self):\n return self.getParameter(AuthenticationHeader.PARAM_CNONCE)",
"def auth_oidc_req(self):\n\n current_time = time.time()\n if self.token_json and self.token_time:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If access_token is included, the id_token SHOULD contain an at_hash. | def test_id_token_contains_at_hash(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].enco... | [
"def authed():\n\n return 'access_token' in session",
"def _validate_at_hash(claims, access_token, algorithm):\n if \"at_hash\" not in claims:\n return\n\n if not access_token:\n msg = \"No access_token provided to compare against at_hash claim.\"\n raise JWTClaimsError(msg)\n\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We MUST validate the signature of the ID Token according to JWS using the algorithm specified in the alg Header Parameter of the JOSE Header. | def test_idtoken_sign_validation(self):
SIGKEYS = self._get_keys()
RSAKEYS = [k for k in SIGKEYS if k.kty == 'RSA']
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(respo... | [
"def _jwt_sign(self, header, payload, algorithm=jws.ALGORITHMS.RS256):\n secret = crypto.dump_privatekey(crypto.FILETYPE_PEM, self.p12.get_privatekey()).decode(\"utf-8\")\n return jws.sign(payload,\n key=secret,\n headers=header,\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test custom function for setting OIDC_IDTOKEN_SUB_GENERATOR. | def test_custom_sub_generator(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_token'].encode('u... | [
"def testConfiguredModuleGeneratorID(self):\n self._testConfiguredPromiseViaAlarm(\"promise_slapos_module_id_generator\")",
"def test_init(self, monkeypatch, setup, sub_generator, p, super_episode_length, expected_sel):\n # setup test scenario\n self._sub_generator = sub_generator\n # call... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test custom function for setting OIDC_IDTOKEN_PROCESSING_HOOK. | def test_additional_idtoken_processing_hook(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(response_dic['id_tok... | [
"def test_additional_idtoken_processing_hook_one_element_in_list(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test custom function for setting OIDC_IDTOKEN_PROCESSING_HOOK. | def test_additional_idtoken_processing_hook_one_element_in_list(self):
code = self._create_code()
post_data = self._auth_code_post_data(code=code.code)
response = self._post_request(post_data)
response_dic = json.loads(response.content.decode('utf-8'))
id_token = JWT().unpack(... | [
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(respons... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test scope is available in OIDC_IDTOKEN_PROCESSING_HOOK. | def test_additional_idtoken_processing_hook_scope_available(self):
id_token = self._request_id_token_with_scope(
['openid', 'email', 'profile', 'dummy'])
self.assertEqual(
id_token.get('scope_of_token_passed_to_processing_hook'),
['openid', 'email', 'profile', 'dummy'... | [
"def test_scope_is_ignored_for_auth_code(self):\n SIGKEYS = self._get_keys()\n for code_scope in [['openid'], ['openid', 'email'], ['openid', 'profile']]:\n code = self._create_code(code_scope)\n\n post_data = self._auth_code_post_data(\n code=code.code, scope=code... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test correct kwargs are passed to OIDC_IDTOKEN_PROCESSING_HOOK. | def test_additional_idtoken_processing_hook_kwargs(self):
id_token = self._request_id_token_with_scope(['openid', 'profile'])
kwargs_passed = id_token.get('kwargs_passed_to_processing_hook')
assert kwargs_passed
self.assertTrue(kwargs_passed.get('token').startswith(
'<Token: ... | [
"def test_additional_idtoken_processing_hook(self):\n code = self._create_code()\n\n post_data = self._auth_code_post_data(code=code.code)\n\n response = self._post_request(post_data)\n\n response_dic = json.loads(response.content.decode('utf-8'))\n id_token = JWT().unpack(respons... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test Proof Key for Code Exchange by OAuth Public Clients. | def test_pkce_parameters(self):
code = create_code(user=self.user, client=self.client,
scope=['openid', 'email'], nonce=FAKE_NONCE, is_authentication=True,
code_challenge=FAKE_CODE_CHALLENGE, code_challenge_method='S256')
code.save()
post_da... | [
"def test_init_key():\n key = 'test_key'\n\n client = GiftbitClient(api_key=key)\n\n assert client.api_key == key",
"def test_retrieve_iceberg_license_key_contents(self):\n pass",
"def test_approve_service_key(self):\n pass",
"def test_api_key(self):\n api_key = 'testapikey'\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if coord is inside vacuum tube. pseudooverrides BaseClassFieldHelper | def is_Coord_Inside_Vacuum(self, x: float, y: float, z: float) -> bool:
return 0 <= x <= self.L and y ** 2 + z ** 2 < self.ap ** 2 | [
"def is_coord_in_vacuum(x: float, y: float, z: float, params) -> bool:\n K, L, ap, field_fact = params\n return -eps <= x <= L * eps_fact and np.sqrt(y ** 2 + z ** 2) < ap",
"def check_safety_zone(self):\n if self.safety_zone is None:\n return 0\n\n if self.position_xy.within(self.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper for interpolation of magnetic fields of plane at center lens.see self.magnetic_potential | def _magnetic_potential_Func_Inner(self, x: float, y: float, z: float) -> float:
V = interp2D(y, z, self.yArrIn, self.z_arrIn, self.VArrIn)
return V | [
"def _magnetic_potential(self, x: float, y: float, z: float) -> float:\n if not self.is_Coord_Inside_Vacuum(x, y, z):\n return np.nan\n y = abs(y)\n z = abs(z)\n if -self.extra_field_length <= x <= self.L_cap:\n V0 = self._magnetic_potential_Func_Fringe(x, y, z)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Force on Li7 in simulation units at x,y,z. pseudooverrides BaseClassFieldHelper Symmetry is used to simplify the computation of force. Either end of the lens is identical, so coordinates falling within some range are mapped to an interpolation of the force field at the lenses end. If the lens is long enough, the inner ... | def _force(self, x: float, y: float, z: float) -> TupleOf3Floats:
if not self.is_Coord_Inside_Vacuum(x, y, z):
return np.nan, np.nan, np.nan
FySymmetryFact = 1.0 if y >= 0.0 else -1.0 # take advantage of symmetry
FzSymmetryFact = 1.0 if z >= 0.0 else -1.0
y = abs(y) # confi... | [
"def _xforce_xyz(self,x,y,z):\n return 1/2*self._b*self._c*f.cy_forceInt(x,y,z,self._a2,self._b2,self._c2,0,self.n)",
"def get_force(self):\n # @todo: decide whether or not we want to have gimbaling provide x force and lift for 4 of the engines, or to do x force (drag) for all engines in force_hover... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Magnetic potential energy of Li7 in simulation units at x,y,z. pseudooverrides BaseClassFieldHelper Symmetry if used to simplify the computation of potential. Either end of the lens is identical, so coordinates falling within some range are mapped to an interpolation of the potential at the lenses end. If the lens is l... | def _magnetic_potential(self, x: float, y: float, z: float) -> float:
if not self.is_Coord_Inside_Vacuum(x, y, z):
return np.nan
y = abs(y)
z = abs(z)
if -self.extra_field_length <= x <= self.L_cap:
V0 = self._magnetic_potential_Func_Fringe(x, y, z)
elif s... | [
"def earthmagnetic(self, *args, **kwargs):\n return _measures.measures_earthmagnetic(self, *args, **kwargs)",
"def potential_energy(self):\n m_s = self.arr_.m_s\n x_s = self.arr_.x_s\n y_s = self.arr_.y_s\n z_s = self.arr_.z_s\n\n m_dm = self.arr_.m_dm\n x_dm = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make brushes for spots with differnet alpha factors. | def makeBrushes(self):
self.brushes = []
deltaAlpha = self.maxAlpha - self.minAlpha
slope = deltaAlpha / (self.dataSize - 1)
for i in range(self.dataSize):
alpha = slope * i + self.minAlpha
self.brushes.append(mkBrush(*self.brushColor, int(alpha)))
#c ... | [
"def build_billboard(self, tex):\n img = Image.new(\"RGBA\", (24,24), self.bgcolor)\n\n front = tex.resize((14, 12), Image.ANTIALIAS)\n alpha_over(img, front, (5,9))\n return img",
"def fence(x, y, l, w, item):\r\n for a in range(x, l + x, 10):\r\n for b in range(y, w + y, 10... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show the scatter and histogram plots. | def showPlot(self):
self.scatterPlotItem.setData(self.xData, self.yData, pen=self.pointPen, brush=self.brushes)
xy, xx = np.histogram(self.xData,
bins=np.linspace(np.min(self.xData), np.max(self.xData), self.numBins))
self.xHistogramItem.setData(xx, xy, stepMode=Tr... | [
"def show_scatterplot(self, *args, **kwargs):\n raise NotImplementedError()",
"def visualize(self):\n plt.show()",
"def show():\n plt.show()",
"def show(self, show =1):\n\t\tplt.scatter(*zip(*self.x), s=0.1)\n\t\tplt.axis('equal')\n\t\tplt.axis('off')\n\t\tmarker='.'\n\t\tif show== 1:\n\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
evaluate Compute the mean Average Precision metrics on a subset with a given model | def evaluate(model, subset, batch_size=default_batch_size, data_dir=default_data_dir, verbose=0):
#disable_tqdm = (verbose == 0)
# Create the generator on the given subset
data_generator = PascalVOCDataGenerator(subset, data_dir)
steps_per_epoch = int(len(data_generator.id_to_label) / batch_size) + 1
... | [
"def average_model(self, key, model):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for param, other_param in zip(\n self.models[key].parameters(), model.parameters()):\n param.data += other_param.data.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that get_outbox returns an Outbox. This test brought to you by the department of redundancy department. | def test_single_scope(self):
with get_outbox() as outbox:
self.assertIsInstance(outbox, Outbox) | [
"def check_outbox(self):\n response = urlopen(self.outbox_url)\n if response.getcode() == 200:\n content = json.loads(response.read())\n # for each message in our outbox\n for message in content['outbox']:\n # add it to our outgoing queue\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that the close() method is called when the scope exits. | def test_was_closed(self, mock_close):
with get_outbox():
pass
self.assertTrue(mock_close.called) | [
"def assert_close(self) -> None:\n assert self.is_closed",
"def test_close_event(self):\n pass",
"def __exit__(self, *args: Any) -> None:\n self.close()",
"def test_operation_on_closed(self):\n self.fh.close()\n assert self.fh[META_ATTR]\n\n # cannot access closed han... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert that the flush() method is called when the scope exits. | def test_was_flushed(self, mock_flush):
with get_outbox():
pass
self.assertTrue(mock_flush.called) | [
"def flush():\n actual_flush()",
"def test_flush_empties(queue):\n queue.flush()\n assert queue.empty()",
"def test_teardown(self):\n assert self.cosm_trade_handler.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def end_test(self):",
"def test_ending(shared_ressources):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
show an error to the screen if vertex does not exist | def vert_not_exists_error(self, v: int) -> Optional[NoReturn]:
try:
assert v in self.vertices.keys()
except AssertionError:
raise Exception(f"Vertex {v} does not exist")
else:
return None | [
"def test_undirected_graph_vertex_already_exists(self):\n g = UndirectedGraph()\n g.add_vertex(v_val='v0')\n\n with self.assertRaises(ValueError):\n g.add_vertex(v_val='v0')",
"def test_directed_graph_vertex_already_exists(self):\n g = DirectedGraph()\n g.add_vertex(v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert from ORF to genomic coordinates. | def toGenomic(self, doSwapStartEnd=True):
self.genomic = True
o = parseOrfHeader(self.accession)
self.sStart,self.sEnd = convertOrfToGenomic(
self.sStart, self.sEnd, o.strand, o.start)
self.addStrandAttribute(o.strand)
if doSwapStartEnd:
self.swapStartEnd(... | [
"def convertOrfToGenomic(start, end, strand, orfStart):\n if strand=='+':\n gStart = orfStart + 3*(start-1)\n gEnd = orfStart + 3*(end-1) + 2\n else:\n gStart = orfStart - 3*(start-1)\n gEnd = orfStart - 3*(end-1) - 2\n return gStart, gEnd",
"def convOSM(wkt):\n obj = OGRGe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Jump to regex match in file. | def jumpToMatch(iFile, regex):
for line in iFile:
if regex.match(line):
return True
return False | [
"def _match(self, regex):\n cregex = re.compile(regex)\n for line in self.content.splitlines():\n match = cregex.match(line)\n if match:\n return match\n raise Exception('No \"{0}\" line in {1}.cpp'.format(\n regex_to_error_msg(regex),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract up to regex match from file. | def extractUptoMatch(iFile, regex):
block = []
for line in iFile:
if regex.match(line):
break
else:
block.append(line.rstrip())
return block | [
"def process_file(file_path):\n file_of_matches=open(file_path, \"r\")\n #loop over every line to get process individual matches\n for match in file_of_matches:\n process_match(match[:-1])#drop the \\n from end of line \n file_of_matches.close()",
"def get_regex_match_in_file(file, regex):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a 6 frame header (from translate or python). | def parseSixFrameHeader(header):
header = header.strip()
regex = re.compile(
'(?P<name>\w+)([\.|:](?P<start>\d+)[-|,](?P<end>\d+))?:(?P<frame>[0-5])')
rs = regex.search(header)
d = rs.groupdict()
d['frame'] = hmmer2frame[int(d['frame'])]
if d['frame']>0:
d['strand'... | [
"def parse_header(self, header):\n # Should be 8 words long\n head_int = np.fromstring(header, dtype=np.uint32) \n\n hdict = self.header_dict\n\n t_ind = hdict['time']\n frame_ind = hdict['frame']\n stat_ind = hdict['station']\n link_ind = hdict['link']... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse an ORF header (from extractORFs.py). | def parseOrfHeader(header):
regex = re.compile(
'(?P<name>\w+)\.(?P<orfId>\d+)\.(?P<start>\d+)-(?P<end>\d+)(\SLength=(?P<length>\d+))?')
rs = regex.match(header.strip())
d = rs.groupdict()
try:
d['start'] = int(d['start'])
d['end'] = int(d['end'])
d['length'] = int(d['len... | [
"def _parse_elf_header(self):\r\n return struct_parse(self.structs.Elf_Ehdr, self.stream, stream_pos=0)",
"def _parse_header(self):\n\n if self.ei_magic != '\\x7fELF':\n return\n\n self.seek(16,0)\n reading = {'h': self.le_half, 'w': self.le_word,'a': self.le_addr,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert 6 frame coords to genomic. | def convertSixFrameToGenomic(start, end, frame, L):
if frame>=0:
gStart = 3*(start-1)+(frame-1)+1
gEnd = 3*(end-1)+(frame-1)+3
else:
gStart = L-(3*(start-1)+abs(frame)-1)
gEnd = L-(3*(end-1)+abs(frame)+1)
return gStart,gEnd | [
"def convertBlockSixFrameToGenomic(block, start, end):\n #prog = re.compile('\\.|-|\\:')\n #tokens = prog.split(block)\n \n #prog = re.compile(\"(?P<chrom>[\\w]+)[.:](?P<bstart>[0-9]+)-(?P<bend>[0-9]+):(?P<frame>[0-9]+)\")\n #rs = prog.search(block)\n #if rs:\n # g = rs.groupdict()\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convenience function that takes block 6 frame coords (block,start,end), extracts the block start/end and frame and converts them to genomic coords ie. | def convertBlockSixFrameToGenomic(block, start, end):
#prog = re.compile('\.|-|\:')
#tokens = prog.split(block)
#prog = re.compile("(?P<chrom>[\w]+)[.:](?P<bstart>[0-9]+)-(?P<bend>[0-9]+):(?P<frame>[0-9]+)")
#rs = prog.search(block)
#if rs:
# g = rs.groupdict()
# chrom,blockStart,... | [
"def _get_genomic_bounds(self):\n\t\treturn self.GVCFLine.get_int_position(), self.GVCFLine.get_int_position() + len(self.GVCFLine.ref_seq)",
"def _GetFrame(self):\n for node in self.svg.iter(): \n if node.get(inkex.addNS(\"Type\",\"TimeAnalysis\")) == \"Frame\":\n frame = node\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert domain coordinates in ORF to genomic. | def convertOrfToGenomic(start, end, strand, orfStart):
if strand=='+':
gStart = orfStart + 3*(start-1)
gEnd = orfStart + 3*(end-1) + 2
else:
gStart = orfStart - 3*(start-1)
gEnd = orfStart - 3*(end-1) - 2
return gStart, gEnd | [
"def toGenomic(self, doSwapStartEnd=True):\n self.genomic = True\n o = parseOrfHeader(self.accession)\n self.sStart,self.sEnd = convertOrfToGenomic(\n self.sStart, self.sEnd, o.strand, o.start)\n self.addStrandAttribute(o.strand)\n if doSwapStartEnd:\n self.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load hmmer domain results. | def loadDomains(iFileHandle):
domains = []
for d in HmmerFile(iFileHandle):
domains.append(d)
return domains | [
"def _load_humaneval(self, eval_cache_path: str) -> Dict:\n if \"cnndm\" in self.task:\n dataset = \"cnndm\"\n elif \"xsum\" in self.task:\n dataset = \"xsum\"\n else:\n raise ValueError\n\n all_humaneval_scores = dict()\n for shots in [0, 5]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens an HPI session. `host` specifies the hostname or IP address to connect to. `port` is the port number the HPI daemon listens on. | def open_hpi_connection(self, host, port=4743, alias=None):
port = int(port)
self._info('Opening connection to %s:%d' % (host, port))
os.environ["OPENHPI_DAEMON_HOST"] = str(host)
os.environ["OPENHPI_DAEMON_PORT"] = str(port)
session = Session()
session.open()
... | [
"def connect(cls, host, port):\n return cls(socket.create_connection((host, port)))",
"def open_connection(self, host, alias=None, port=23, timeout=None,\n newline=None, prompt=None, prompt_is_regexp=False):\n if timeout is None or timeout == '':\n timeout = self._t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Switches between opened HPI session usigg an index or alias. The index is got from `Open HPI Connection` keyword, and an alias can be given to it. Returns the index of previously active connection. | def switch_hpi_connection(self, index_or_alias):
old_index = self._cache.current_index
self._active_device = self._cache.switch(index_or_alias)
return old_index | [
"def switch_ipmi_connection(self, index_or_alias):\n\n old_index = self._cache.current_index\n self._active_connection = self._cache.switch(index_or_alias)\n return old_index",
"def switch_couchbase_connection(self, index_or_alias: Union[int, str]) -> int:\n\n old_index = self._cache.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Closes the current HPI session. | def close_hpi_connection(self, loglevel=None):
self._active_session.close() | [
"def closeSession(self):\n self.hide()",
"def end_session(self):\n self.sess.close()",
"def close(self ):\n self.session.close()\n self.logger.info(\"Matlab session closed\")",
"def _close_session(cls):\n cls.coord.request_stop()\n cls.coord.join(cls.thread)\n cls... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Closes all open HPI sessions and empties the connection cache. After this keyword, new indexes got from the `Open HPI Connection` keyword are reset to 1. This keyword should be used in a test or suite teardown to make sure all connections to devices are closed. | def close_all_hpi_connections(self):
self._active_session = self._cache.close_all() | [
"def close_hpi_connection(self, loglevel=None):\n self._active_session.close()",
"def _close(self):\n print(\"Closing connections and unlinking memory...\", file=sys.stderr)\n self.csocket.close()\n self.ccontext.term()\n if hasattr(self, 'asocket'):\n self.asocket.cl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the entity path all further keywords operates on. | def set_entity_path(self, ep):
try:
ep = EntityPath().from_string(ep)
except ValueError:
raise RuntimeError('Invalid entity path "%s"' % ep)
self._info('Setting entity path to %s' % (ep,))
self._cp['entity_path'] = ep | [
"def set_schema_paths(cls, schema_path, schema_entity_path):\n cls.__schema_path = schema_path\n cls.__schema_entity_path = schema_entity_path",
"def set_reference_path(self, pt):\n self.pt = pt",
"def path(self, path: str):\n self._occurrence_data['path'] = path",
"def hook_datase... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the FUMI number for all further FUMI keywords. | def set_fumi_number(self, number):
self._cp['fumi_number'] = number | [
"def fmi_id(self, fmi_id: int):\n\n self._fmi_id = fmi_id",
"def set_feature_number(self):\r\n self.n_features = self.exprs.shape[1]",
"def fmi_text(self, fmi_text: str):\n\n self._fmi_text = fmi_text",
"def ftduino_id_set(self, identifier):\n self.comm('ftduino_id_set {0}'.format(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails unless the specified FUMI RDR exist. `id` is the ID string of the resource descriptor record. If the RDR is found, it will be automatically selected. | def fumi_rdr_should_exist(self, id):
self._rdr_should_exist(FumiRdr, id) | [
"def dimi_rdr_should_exist(self, id):\n self._rdr_should_exist(DimiRdr, id)",
"def id_available(self, _id):\n raise NotImplementedError",
"def is_this_record_exist(table, id_):\n if id_[0] not in [record[0] for record in table]:\n\n ui.print_error_message(\"Record with this ID not found\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the DIMI number for all further DIMI keywords. | def set_dimi_number(self, number):
self._cp['dimi_number'] = number | [
"def SetDimensions(self, i: 'unsigned int', dim: 'unsigned long long') -> \"void\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_SetDimensions(self, i, dim)",
"def setidd(cls, iddinfo, iddindex, block, idd_version):\n cls.idd_info = iddinfo\n cls.block = block\n cls.idd_index = id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fails unless the specified DIMI RDR exist. A found RDR will be automatically selected. See also `FUMI RDR Should Exist` keyword. | def dimi_rdr_should_exist(self, id):
self._rdr_should_exist(DimiRdr, id) | [
"def fumi_rdr_should_exist(self, id):\n self._rdr_should_exist(FumiRdr, id)",
"def ExisteRelacion(self,dr,usuario):\n bRetorno=False\n query=db.GqlQuery(\"select * from Relacion where usuario=:1 and doctor=:2\",usuario, dr)\n if query.count()>0:\n bRetorno=1\n else: #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
All team members may view a Context Admin members may change a Context Admin members may delete a Context | def grant_permissions(self):
assign_perm("context.view_context", self.team.group, self)
assign_perm("context.change_context", self.team.admingroup, self)
assign_perm("context.delete_context", self.team.admingroup, self) | [
"async def administrators(self, ctx, arg):\r\n # TODO should add initial check who can use this command\r\n # maybe only people with admin permissions\r\n pass",
"async def roles(self, ctx):\n pass",
"def test_otoroshi_controllers_adminapi_users_controller_delete_admin(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save Context and grant permissions | def save(self, **kwargs):
super().save(**kwargs)
self.grant_permissions() | [
"def grant_permissions(self):\n assign_perm(\"context.view_context\", self.team.group, self)\n assign_perm(\"context.change_context\", self.team.admingroup, self)\n assign_perm(\"context.delete_context\", self.team.admingroup, self)",
"def save_context(self):\n if self.context is not N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches the owner user id of the requested entity_type/entity_id | def get_owner_id(session, entity_type, entity_id):
if entity_type == "track":
owner_id_query = (
session.query(Track.owner_id)
.filter(
Track.track_id == entity_id,
Track.is_delete == False,
Track.is_current == True,
)
... | [
"def RequireOwner(cls, photo_entity):\n if not photo_entity.from_datastore:\n raise endpoints.NotFoundException(Photo.NOT_FOUND_ERROR)\n\n current_picturesque_user = cls.RequirePicturesqueUser()\n\n if photo_entity.owner != current_picturesque_user.user_object:\n raise endpoints.ForbiddenExceptio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the notifications for remix tracks that are reposted/favorited by the parent remix author | def get_cosign_remix_notifications(session, max_block_number, remix_tracks):
if not remix_tracks:
return []
remix_notifications = []
remix_track_ids = [r["item_id"] for r in remix_tracks]
# Query for all the parent tracks of the remix tracks
tracks_subquery = (
session.query(Track)... | [
"def reactions(self):\n return self.__reactions.list()",
"def getNotifications(nodeIdentifier, items):",
"def detailed_reactions(self):\n return list(self._detailed_reactions)",
"def feeds_fanout_replied(action):\n # Fan out notification to parent Comment followers\n for follower in models_act... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches the notifications events that occurred between the given block numbers | def notifications():
db = get_db_read_replica()
min_block_number = request.args.get("min_block_number", type=int)
max_block_number = request.args.get("max_block_number", type=int)
track_ids_to_owner = []
try:
track_ids_str_list = request.args.getlist("track_id")
track_ids_to_owner ... | [
"def get_notifications(self, context):\n module_context.init()\n LOG.info(\"Received RPC GET NOTIFICATIONS \")\n events = self.sc.get_stashed_events()\n notifications = []\n for event in events:\n notification = event.data\n msg = (\"Notification Data: %r\" %... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
flatten pywt coefficients to a vector | def flatten_coeffs(coeffs):
x0 = []
for c in coeffs:
x0.append(np.array(c).ravel())
xvec = np.concatenate(x0)
return xvec | [
"def get_polyterms_w_xform(self):\n if self.polytermx_cache:\n return self.polytermx_cache\n greens = self.decompose_greens()\n self.polytermx_cache = []\n for (pp,hs,xi) in [self.poly_term_w_xi(t) for t in greens]:\n self.polytermx_cache += [(pp.full_simplify(), hs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build pywt coefficients to from a vector this is the inverse of flatten_coeffs | def build_coeffs(xvec):
nc = int(np.log2(len(xvec)))/2
coeffs = [xvec[0].reshape(1, 1)]
for i in range(nc):
c1 = xvec[4**i:2*4**i].reshape(2**i, 2**i)
c2 = xvec[2*4**i:3*4**i].reshape(2**i, 2**i)
c3 = xvec[3*4**i:4*4**i].reshape(2**i, 2**i)
coeffs.append((c1, c2, c3))
return coeffs | [
"def flatten_coeffs(coeffs):\n x0 = []\n for c in coeffs:\n x0.append(np.array(c).ravel())\n xvec = np.concatenate(x0)\n return xvec",
"def _construct_coefficients(self):\n coeffs = [0]*self.degree\n\n N = float(self.evalpts)\n\n lvals = np.arange(self.evalpts).astype('float')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Populates a given SdFec instance with parameters from an HWH file. Parameters include... + Basic IP config settings (XSdFec_Config struct) + LDPC parameter table (a dict of named XSdFecLdpcParameters) | def populate_params(obj, params):
obj._config = _ffi.new('XSdFec_Config*')
obj._code_params = type('', (), {})
_set_params(obj._config, params, _config)
_set_params(obj._code_params, params, _code_params) | [
"def __init__(self, description : dict):\n super().__init__(description)\n if 'parameters' in description:\n populate_params(self, description['parameters'])\n else:\n warnings.warn(\"Please use an hwh file with the SD-FEC driver\"\n \" - the defau... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a cdata XSdFecLdpcParameters version of the given dict | def _pack_ldpc_param(param_dict : dict) -> any:
key_lookup = {
'k': 'K',
'n': 'N',
'p': 'PSize',
'nlayers': 'NLayers',
'nqc': 'NQC',
'nmqc': 'NMQC',
'nm': 'NM',
'norm_type': 'NormType',
'no_packing': 'NoPacking',
'special_qc': 'SpecialQ... | [
"def make_crds_parameter_dict(self):\n\n parameters = {}\n parameters['INSTRUME'] = self.instrument.upper()\n parameters['DETECTOR'] = self.detector.upper()\n parameters['READPATT'] = self.read_pattern.upper()\n parameters['SUBARRAY'] = self.subarray.upper()\n parameters['D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrapper to call C functions, checking if they exist and their return status. | def _safe_wrapper(name: str, *args, check_return: bool=True, **kwargs) -> any:
with sys_pipes():
if not hasattr(_lib, name):
raise RuntimeError(f"Function {name} not in library")
ret = getattr(_lib, name)(*args, **kwargs)
if check_return and ret:
raise RuntimeError(f"... | [
"def check_func (self, func,\r\n headers=None, include_dirs=None,\r\n libraries=None, library_dirs=None,\r\n decl=0, call=0):\r\n\r\n self._check_compiler()\r\n body = []\r\n if decl:\r\n body.append(\"int %s ();\" % func)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make an SD FEC instance as described by a HWH file snippet | def __init__(self, description : dict):
super().__init__(description)
if 'parameters' in description:
populate_params(self, description['parameters'])
else:
warnings.warn("Please use an hwh file with the SD-FEC driver"
" - the default configurati... | [
"def __init__(self, name, header):\n\n self.header = header.copy()\n#\n# Check if the file already exists. If it does not, check to see\n# if we were provided with a Primary Header. If not we will need\n# to prepend a default PrimaryHDU to the file before writing the\n# given header... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List the available LDPC code names | def available_ldpc_params(self) -> list:
return list(self._code_params.ldpc.keys()) | [
"def list(cls):\n\n codes = []\n\n for key in cls.options.keys():\n\n opt = {\n 'key': key,\n 'value': cls.options[key]\n }\n\n label = cls.labels.get(key)\n\n if label:\n opt['label'] = label\n\n codes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stub for setting Turbo code parameters | def set_turbo_params(self, turbo_params: dict) -> None:
# TODO
pass | [
"def test_set_system_param(self):\n pass",
"def do_set(self, args):\n\n split_args = args.split()\n if len(split_args) < 1:\n module_logger.error(\"You must provide at least one argument\".format(args))\n elif len(split_args) == 1:\n if split_args[0] == \"iface\":... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the callback function triggered on __setitem__ | def set_callback(self, callback):
self.callback = callback | [
"def setter(self, fn):\n self.cb_set = fn",
"def set_callback(self, data_id, func):\n self.callbacks[data_id] = func",
"def register_change_item_callback(self, callback):\n self.callbacks.append(callback)",
"def set_callback(name, new_callback=None):\n getattr(mujoco, \"set_\" + name)(ne... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check for not bigger than L1 size | def _l1_buffer_size_check(max_feature_map_l1, fusion_para):
l1_buffer_size = cce_conf.get_soc_spec("L1_SIZE")
l1_fusion_type = fusion_para.get("l1_fusion_type")
if (l1_fusion_type == 1) or (l1_fusion_type == 0):
pass
elif max_feature_map_l1 > l1_buffer_size:
raise... | [
"def __gt__(self, other):\n return self.get_size() > int(other)",
"def isFull(self):\n #In this Case the function is Only FOr Understanding because the Linked List \n #Is store the value is Heap Memory Because it is Dyanamic in nature\n #So This Is Only For Understanding Purpose\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the tensor_map in convparam | def get_tensor_map(self):
return self.TENSOR_MAP | [
"def tensorflow_param(ckpt_path):\r\n tf_param = {}\r\n reader = tf.train.load_checkpoint(ckpt_path)\r\n for name in reader.get_variable_to_shape_map():\r\n try:\r\n print(name, reader.get_tensor(name).shape)\r\n tf_param[name] = reader.get_tensor(name)\r\n except Attrib... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
translate tvm.shape to list type in python | def shape_to_list(shape):
if isinstance(shape, (list, tuple)):
return shape
tmp = []
for i in shape:
tmp.append(i.value)
return tmp | [
"def _shape_to_list(shape):\r\n if isinstance(shape, (list, tuple)):\r\n return shape\r\n tmp = []\r\n if shape == \"\":\r\n return ()\r\n for i in shape:\r\n tmp.append(i.value)\r\n return tmp",
"def shape_from_tagged(node: TaggedDict) -> list[int]:\n if \"shape\" in no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fmap c0 check value | def _fmap_c0_check_value(dtype, optim_dict):
fmap_c0_check_value = 4 if optim_dict["c0_optim_flg"] and \
(is_v200_version() or is_lhisi_version()) else CUBE_MKN[dtype]['mac'][1]
return fmap_c0_check_value | [
"def is_scalar_zero(expr):\n return is_scalar_x(expr, 0)",
"def zero_crossings(data):\n pos = data > 0\n npos = ~pos\n return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]",
"def _convert_c_if_args(self, cond_tuple, bit_map):\n if isinstance(cond_tuple[0], Clbit):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check L1 fusion fmap select | def _fusion_fmap_select(fmap):
valid_shape = ConvParam.fusion_para.get("valid_shape")
offset = ConvParam.fusion_para.get("slice_offset")
input_memory_type = ConvParam.fusion_para.get("input_memory_type")
if offset and input_memory_type != 1 :
if TENSOR_MA... | [
"def _is_select(self, op):\n return hasattr(op, \"select\") and getattr(op, \"select\") is not None",
"def test_select(self):\n tList = self.tList\n # Should be able to detect regardless of case\n nList = \"transformXOR, Transformadd\"\n transList = select_transformers(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
calculate im2col_fractal tvm lambda function | def __im2col_fractal_indices(indices, fmap):
block_size = config['mac'][1]
block_size_m = config['mac'][0]
_, howo, _, kernel_h, kernel_w, _ = fmap.shape
batch_size, index_i1, index_j1, index_i0, index_j0 = indices
n_index = batch_size
hw_index = ... | [
"def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
merage im2col_row_major axis of input_C1, filter_h, filter_w, input_C0 | def _im2col_row_major_reshape(fmap_im2col_shape, \
fmap_row_major, compute_dtype):
_, howo, input_c1, filter_h, filter_w, input_c0 = fmap_row_major.shape
row_major_reshape = tvm.compute(
fmap_im2col_shape, lambda i, j, k: tvm.select(
tvm.all(k < input_c1*filter_h*filt... | [
"def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):",
"def conv_forward_im2col(x, w, b, conv_param):\n N, C, H, W = x.shape\n num_filters, _, filter_height, filter_width = w.shape\n stride, pad = conv_param['stride'], conv_param['pad']\n\n # Check dimensions\n a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get fmap_shape_nc1hwc0 for dsl interface | def _get_dsl_fmap_shape_nc1hwc0():
valid_shape = ConvParam.fusion_para.get("valid_shape")
if valid_shape:
fmap_shape_nc1hwc0 = tuple(shape_to_list(valid_shape))
else:
fmap_shape_nc1hwc0 = tuple(shape_to_list(data.shape))
return fmap_shape_nc1hwc0 | [
"def shape(self) -> S:",
"def shape(name):\n return Formex(pattern(Pattern[name]))",
"def shape_from_config_jungfrau(co):\n return (co.numberOfModules(), co.numberOfRowsPerModule(), co.numberOfColumnsPerModule())",
"def output_shape(self):\n return None",
"def get_image_shape(self) -> Tuple[int... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test all the option max_of | def test_option_max():
for t1 in tipes :
o1= rawOptionType(t1)
assert_max(t1, rawSomeType(), t1)
assert_max(o1, rawSomeType(), o1)
for t2 in tipes:
o2 = rawOptionType(t2)
assert_max(o1,t2, rawOptionType( t1.max_of(t2)))
assert_max(o1,o2, rawOptionT... | [
"def test_result_has_max_requested_or_less(self):\n pass",
"def test_maximum():\n test_maximum_case(0, [0, 0, 0], 0)\n test_maximum_case(1, [2, 0, 0], 2)\n test_maximum_case(2, [1, 2, 1], 2)\n test_maximum_case(3, [4, 5, 6], 6)\n test_maximum_case(4, [4.5, 5.1, 6.7], 6.7)\n test_maximum_c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test all the option compatible with | def test_option_compatible():
for t1 in tipes :
o1= rawOptionType(t1)
assert_compatible(o1, rawSomeType(), True)
for t2 in tipes:
o2 = rawOptionType(t2)
assert_compatible(o1, t2, t1.compatible_with(t2))
assert_compatible(o1, o2, t1.compatible_with(t2)) | [
"def match_options(self): # pragma: no cover",
"def test_get_options_expirations(self):\n pass",
"def test_get_options(self):\n pass",
"def ValidateOptions(self, opt, args):",
"def test_get_option_expirations_realtime(self):\n pass",
"def test_get_options_chain_eod(self):\n pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Production line for correcting and estimating CPOL data radar parameters. The naming convention for these parameters is assumed to be DBZ, ZDR, VEL, PHIDP, KDP, SNR, RHOHV, and NCP. KDP, NCP, and SNR are optional and can be recalculated. | def production_line(radar_file_name, sound_dir, is_cpol=True, use_unravel=True):
# !!! READING THE RADAR !!!
if is_cpol:
radar = pyart.io.read(radar_file_name)
else:
radar = radar_codes.read_radar(radar_file_name)
# Correct data type manually
try:
radar.longitude['data'] = r... | [
"def _fetch_radar_params(self):\n resp = self._send_command('GRPS')\n if resp != Response.OK:\n raise KLD7Exception(\"GRPS command failed: {}\".format(resp))\n code, payload = self._read_packet()\n if code != 'RPST':\n raise KLD7Exception(\"GRPS data has wrong packe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create pair of interfaces | def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None,
node1=None, node2=None, deleteIntfs=True ):
# Leave this as a class method for now
assert cls
return makeIntfPair( intfname1, intfname2, addr1, addr2, node1, node2,
deleteIntfs... | [
"def test_create_interface_two_times(self):\n h1 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i1 = self.plugin.createAndAddInterface(h1, \"1.2.3.4\")\n\n h2 = self.plugin.createAndAddHost(\"pepito\", \"linux\")\n i2 = self.plugin.createAndAddInterface(h2, \"1.2.3.4\")\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
To validate a serializer and raise error on bad validation | def validate_serializer(serializer):
if not serializer.is_valid():
raise ValueError(serializer.errors) | [
"def test_serializer_validation(self):\n serializer = self.serializer_class(data={})\n serializer.is_valid()\n\n expected_errors = {\n 'email': ['This field cannot be blank.'],\n }\n\n self.assertEqual(serializer.errors, expected_errors)",
"def test_invalid_datatype(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is called when the spider is opened. | def open_spider(self, spider):
_log.info('open_spider[%s]....' % spider.name) | [
"def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)",
"def open_spider(self, spider):\n logging.info('open spider')",
"def open(self, spider):\n self.spider = spider\n self.file_system = S3Hook()\n return super(ManifestFeedStorage, self).o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset counts matrices to fresh values. | def reset_mats(self, init_base=None, init_oracle=None):
if init_base is None:
self.seen_states = 1 # NB. this keeps track of number of states visited so far
self.base_counts = Matrix() # NB. `base_counts[s,t]` records number of times we've performed s->t transition
... | [
"def clear_summaries(self):\n\n\t\tself.count = 0\n\t\tmemset(self.counts, 0, self.n*sizeof(double))",
"def reset_all(self):\n self.reset_memory()\n self.reset_traces()\n self.reset_tags()\n\n self.prev_obs = np.zeros(self.nx_inst)\n self.prev_qa = 0\n self.prev_max = 0."... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new state j given that we're currently in a state i. Running this method updates the underlying count tables (self.{base,oracle}_counts); use `HDP.probas(state)` to get the probability over all visited states `j`. | def sample(self, state):
# get probabilities for next state over all states observed so far, plus oracle proba in final index:
base_probas = self.base_probas(state)
# sample one of the states (or oracle query):
next_state = np.random.choice(range(len(base_probas)), p=base_probas)
... | [
"def state(self, i):\n return self.basis[i]",
"def creation(i,state_in):\n coef = np.sqrt(state_in[i]+1)\n state_out=state_in.copy()\n state_out[i] = state_out[i]+1\n return state_out,coef",
"def successors(self, new_state):\n return self.graph[new_state]",
"def index(self, state):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an array of probabilities based on current configuration of `self.oracle_counts`. Returned 1d array of type np.float is of size `self.num_states+1`, representing probabiltiies for returning an existing state with an additional value at the end representing the probability for transitioning to a new, unseen state... | def oracle_probas(self):
n_js = np.array(self.oracle_counts[:self.seen_states,0], dtype=np.float64)
denominator = np.reciprocal(np.sum(n_js) + self.gamma)
new_state_proba = self.gamma * denominator
existing_state_probas = n_js * denominator
combined_probas = np.concatenate((exist... | [
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n probs = []\n for state, action in zip(states, actions):\n probs.append(1 if self.sample_action(state) == action else 0)\n return np.array(probs)",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset hyperparameters for HDPs. | def reset_params(self, t_alpha, t_beta, t_gamma, e_beta, e_gamma):
self.t_hdp.reset_params(t_alpha, t_beta, t_gamma)
self.e_hdp.reset_params(0., e_beta, e_gamma) | [
"def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise pooling weights.\n nn.init.normal_(self.pooling_weights, mean=0.0, std=0.02)",
"def reset_parameters(self) -> None:\n if hasa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the color of this string, but cap it so that added characters will not be colored. | def color_cap(color_letter, string):
return C(color_letter)+string+C('N') | [
"def cyan(string):\n if not PrintingOptions().is_colored():\n return string\n return colored(string, 'cyan')",
"def colorize(string: str, color: str, bold: bool = False) -> str:\n color_escape = getattr(colorama.Fore, color.upper(), None)\n if not color_escape:\n return string\n elif ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a string and removes all of the characters in removers. | def remove_chars(string, removers):
new_string = string #String to edit
for char in removers: #Iterate through characters
new_string = string.replace( char, '' ) #Remove chars one by one
return new_string | [
"def strip_chars(string, chars):\r\n return \"\".join(c for c in string if c not in chars)",
"def _strip_chars(self, word, chars_to_remove):\n for char in chars_to_remove:\n word = word.replace(char, '')\n return word",
"def remove_letters(letter, string):\r\n \r\n new_string = \"\"\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace one section of a string with another. | def replace(string,section, replacement):
found_spot=string.find(section)
dist=len(section)
newstring=string[:found_spot]+replacement+string[found_spot+dist:]
return newstring | [
"def strReplace( x, idx1, idx2, y):\n\n b0 = x[0:idx1]\n b1 = y\n b2 = x[idx2:]\n b = b0+b1+b2\n return str(b)",
"def transform_string(source: str, s1: str, s2: str) -> str:\n for index in range(len(source)):\n\n # if character is in s1, inserts character in s2 at same index\n if source[index] in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks to see if pending edits remain. Returns whether or not to end the program. | def edit_check():
if n_edits>0:
print("You still have pending edits. Do you want to save them?")
choice=input("")
if choice in confirmdict:
if UI.confirm():
return True
else:
return False
else:
print("Choices not... | [
"def check_modified(self, ):\n if not cmds.file(q=1, modified=1):\n return True\n curfile = cmds.file(q=1, sceneName=1)\n r = cmds.confirmDialog( title='Save Changes', message='Save changes to %s?' % curfile,\n but... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate fastq_dataframe for pipeline input. | def make_fastq_dataframe(file_path, barcode_version, output_path=None):
barcode_version = barcode_version.upper()
if barcode_version == 'V1':
parser = _parse_v1_fastq_path
elif barcode_version == 'V2':
parser = _parse_v2_fastq_path
else:
raise ValueError(f'Primer Version can only... | [
"def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RBAC policy is making a network visible to users in a specific tenant. Previously this network was not visible to users in that tenant. We will want to add this tenant to the members list. Also add the RBAC policy. | def rbac_create(self, event_type, payload, timestamp):
valid_types = ["network"]
event_type = payload['rbac_policy']['object_type']
action = payload['rbac_policy']['action']
if action not in RBAC_VALID_ACTIONS or event_type not in valid_types:
# I'm bored. Nothing that conce... | [
"def grant_rbac_policy(self, project_id, object_id, object_type='network'):\n policy = self.get_rbac_policies(retrieve_all=True,\n object_type=object_type,\n object_id=object_id,\n target_tena... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RBAC policy is making a network invisible to users in specific tenant. Previously this network was visible to users in that tenant. We will remove this tenant from the members list. Also remove the RBAC policy. | def rbac_delete(self, event_type, payload, timestamp):
policy_id = payload['rbac_policy_id']
# Read, modify, write an existing network document. For both the
# admin and user version of the document.
# Find all documents (admin and user) with the policy ID.
docs = self.index_he... | [
"def revoke_rbac_policy(self, project_id, object_id, object_type='network'):\n policy = self.get_rbac_policies(retrieve_all=True,\n object_type=object_type,\n object_id=object_id,\n target_ten... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the status of the submission. Retry until the status is "Valid", or if there is an error with the request to get the submission envelope. | def wait_for_valid_status(envelope_url, http_requests):
def log_before(envelope_url):
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print('{0} Getting status for {1}'.format(now, envelope_url))
def keep_polling(response):
# Keep polling until the status is "Valid/Complete" or "Inv... | [
"def check_submission(submission):\n desc = sfn.describe_execution(executionArn=submission.execution_arn)\n status = desc[\"status\"]\n result = None\n if status == \"SUCCEEDED\":\n sfn_output = js... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overrides handle so that the environ is set. | def handle(self):
self.environ = self.server._environ.copy()
BaseHTTPRequestHandler.handle(self) | [
"def _config(\n self,\n environ,\n start_response,\n set_response\n ):\n self._environ = environ\n self._start_response = start_response\n self._set_response = set_response\n # set default headers\n self.set_headers('content-type', \"application/json... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a multiselector form The form consists of a label for each selector, either taken from a list of labels or else consisting of a prefix and a number. The buttons are labeled with the selector state. | def __init__(self, num_selectors,
label_text = [],
label_template = "Channel",
button_text = [],
button_template = "Port",
buttons = 1,
title="MultiSwitch"):
super(MultiSelectorForm, self)._... | [
"def select_labels(self) -> List[Label]:",
"def make_choose_control(field_name,\n included_label,\n included_items,\n excluded_label,\n excluded_items,\n item_to_text=str,\n it... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Pop up a 1xN selector if a button is pressed | def popup(self, index, dummy):
mylogger.debug("multi-selector form popup(%d) invoked",index)
self.dialog = Selector_Form(index)
mylogger.debug("dialog is type %s", type(self.dialog))
self.dialog.setupUi(self.button_text, label_default="Port", cols=2)
self.dialog.setWindowTitle("IF "+str(index))
... | [
"def buttons(self, state):\n pass",
"def selectEveryNth(self, n): \n\t\tif not n:\n\t\t\tfor i in range(len(self.buttonList)):\n\t\t\t\tself.selectedFrames[i] = 0\n\t\t\t\tself.setButtonState(self.buttonList[i], 0)\n\t\t\treturn\n\t\tfor i, btn in enumerate(self.buttonList):\n\t\t\tif not (i) % n:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the state of a selector in a group This will update the selector button text if a new state is provided. Else it will open a window to allow the user to select a new state. If a state is provided either way, the button text will be set to that of the new state. Otherwise, the state is 1 and the text "Unknown". | def update_selector(self, index, new_state=-1):
mylogger.debug("update_selector invoked for switch %d",index)
if new_state > -1:
self.state[index] = new_state
else:
try:
self.state[index] = self.dialog.state
self.dialog.close()
except AttributeError:
# program has n... | [
"def setGroupingActive( self, state ):\n self.uiGroupBTN.setChecked(state)",
"def update_state(self):\n state = self.bridge.get_group(self.group_id)\n\n logger.debug(\"group state: %s\", pformat(state))\n\n self._on = state['state']['all_on']\n if self._on or state['action']['br... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve data from self.sources into self.directory / 'raw' and validate against checksum. | def retrieve(self):
target_dir = self.directory / "raw"
os.makedirs(target_dir, exist_ok=True) # create directory if it doesn't exist
for url, filename, md5_checksum in self.sources:
if utils.is_url(url):
processing_fn = partial(
utils.fetch_url, ... | [
"def check_raws(self, _location, _date, _date_raw_data):\n try:\n # raw file names\n _raws = [_s for _s in _date_raw_data if re.match(re.escape(self.id), _s) is not None]\n # deleted?! unset pipelined as well\n if len(_raws) == 0:\n self.db_entry['ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load polling data for UK General Elections. | def load_polling_data(self):
polls = {}
for geo in self.geos:
poll_df = pd.read_csv(
self.directory / "raw" / f"general_election-{geo}-polls.csv", parse_dates=["to"]
).sort_values("to")
poll_df.columns = utils.sanitise(
poll_df.columns,... | [
"def load_poll_data():\n polls = []\n \n with open('./cogs/polls.json', 'r', encoding='utf-8') as poll_file:\n try:\n polls = json.load(poll_file)\n except json.JSONDecodeError:\n pass\n return polls",
"def fetch_data(self):\r\n print(\"Fetching Data from USG... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes straight average across each pollster's final poll in last week prior to election day. Repeat for regions, if regional polling is available. | def get_regional_and_national_poll_of_polls(self, polls):
election_day = self.now_date
one_week_before = election_day - pd.Timedelta(days=7)
one_month_before = election_day - pd.Timedelta(days=30)
# Use single last poll from each pollster in final week of polling then average out
... | [
"def average_quarterly(table):\n quarterly_pollutant = {'1Q 2013': [], '2Q 2013': [], '3Q 2013': [], '4Q 2013': [],\n '1Q 2014': [], '2Q 2014': [], '3Q 2014': [], '4Q 2014': [],\n '1Q 2015': [], '2Q 2015': [], '3Q 2015': [], '4Q 2015': [],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge national polling, and geolevel polling if available, into results dataframe. | def combine_results_and_polls(results, polls):
# Merge into previous election's results to calculate swing
results = (
results.merge(
right=polls.query('geo == "uk"')[["party", "voteshare"]].rename(
columns={"voteshare": "national_polls"}
)... | [
"def load_polling_data(self):\n polls = {}\n for geo in self.geos:\n poll_df = pd.read_csv(\n self.directory / \"raw\" / f\"general_election-{geo}-polls.csv\", parse_dates=[\"to\"]\n ).sort_values(\"to\")\n poll_df.columns = utils.sanitise(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assumes df has `ons_id` and `party` columns. | def calculate_winners(df, voteshare_col):
return (
df.sort_values(voteshare_col, ascending=False)
.groupby("ons_id")
.head(1)[["ons_id", "party"]]
.set_index("ons_id")
.party
) | [
"def add_ons_column(df,dataset):\n x = df['postcode'].values.tolist()\n ons_results = get_data(x,dataset) \n ons_df = pd.DataFrame(ons_results).drop_duplicates()\n return pd.merge(df,ons_df,on=\"postcode\",how=\"inner\")",
"def get_full_vote_info(votes_df):\n vote_counts = votes_df.groupby(['vot... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process results data from consecutive UK General Elections (e.g. 2010 and 2015) into a single modelready dataset ready for predicting the later (e.g. 2015) election. | def process(self):
processed_directory = self.directory / "processed"
os.makedirs(processed_directory, exist_ok=True) # create directory if it doesn't exist
# Import general election results & polling data
results_dict = self.load_results_data()
polls_full = self.load_polling_d... | [
"def preprocess_by_country_all_years(training_set, submit_rows_index, startyear=1972):\n\n # Rename columns to make indexing easier\n info_cols = training_set.iloc[:, -3:]\n training_set = training_set.iloc[:, :-3]\n training_set = training_set.rename(lambda x: int(x.split(' ')[0]), axis=1)\n trainin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set dialog proxies from proxies dict | def set_proxies(self, proxies):
if proxies:
protocols = ["http", "https", "ftp", "socks"]
for protocol in protocols:
entry_id = protocol + "_proxy_entry"
entry_widget = self.ui.get_object(entry_id)
port_id = protocol + "_proxy_port"
... | [
"def set_proxy(self, host, port):\n self.proxy = {\n 'host': host,\n 'port': port\n }",
"def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):\r\n self.__proxy = (proxytype,addr,port,rdns,username,password)",
"def update_all_proxy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create weighted box for set of boxes | def get_weighted_box(boxes, conf_type='avg'):
box = np.zeros(6, dtype=np.float32)
conf = 0
conf_list = []
for b in boxes:
box[2:] += (b[1] * b[2:])
conf += b[1]
conf_list.append(b[1])
box[0] = boxes[0][0]
if conf_type == 'avg':
box[1] = conf / len(boxes)
elif... | [
"def scale_bbox(self, boxes, old_width, new_width):\n boxes = copy.deepcopy(boxes)\n scale_percent = new_width / old_width\n for b in boxes:\n b.xmin = int(b.xmin * scale_percent)\n b.ymin = int(b.ymin * scale_percent)\n b.xmax = int(b.xmax * scale_percent)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |