query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Formats the location values separating keys, values and k/v pairs >>> l = Location(42.1, 23.5, "test") | def format_geocommit(self, keyval_separator, entry_separator):
end = entry_separator
sep = keyval_separator
msg = "lat" + sep + str(self.lat) + end
msg += "long" + sep + str(self.long) + end
for attr in self.optional_keys:
if hasattr(self, attr):
... | [
"def coords_dict_to_coords_string(coords):\n longitude, latitude = None, None\n for k,v in coords.items():\n if \"at\" in k:\n latitude = v\n if \"ong\" in k:\n longitude = v\n if not longitude and latitude:\n print(\"Unable to identify longitude and latitude keys... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_long_geocommit() | def format_long_geocommit(self):
geocommit = "geocommit (1.0)\n"
geocommit += self.format_geocommit(": ", "\n")
geocommit += "\n\n"
return geocommit | [
"def format_short_geocommit(self):\r\n geocommit = \"geocommit(1.0): \"\r\n geocommit += self.format_geocommit(\" \", \", \")\r\n geocommit += \";\"\r\n\r\n return geocommit",
"def longitude_string(self):\n if self.coord_format == 'dd':\n formatted_longitude = self.lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_short_geocommit() | def format_short_geocommit(self):
geocommit = "geocommit(1.0): "
geocommit += self.format_geocommit(" ", ", ")
geocommit += ";"
return geocommit | [
"def format_long_geocommit(self):\r\n geocommit = \"geocommit (1.0)\\n\"\r\n geocommit += self.format_geocommit(\": \", \"\\n\")\r\n geocommit += \"\\n\\n\"\r\n\r\n return geocommit",
"def from_short_format(data):\r\n m = re.search(\"geocommit\\(1\\.0\\): ((?:[a-zA-Z0-9_-]+ [^,;... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a string in short format to create an instance of the class. >>> l = Location.from_short_format( | def from_short_format(data):
m = re.search("geocommit\(1\.0\): ((?:[a-zA-Z0-9_-]+ [^,;]+, )*)([a-zA-Z0-9_-]+ [^,;]+);", data)
if m is None:
return None
values = m.group(1) + m.group(2)
data = dict()
for keyval in re.split(",\s+", values):
ke... | [
"def parse(location):\r\n s = strip_unit(normalize(location))\r\n logger.debug('parse: normalized and stripped %r to %r' % (location, s))\r\n tokens = token_split(s)\r\n len_tokens = len(tokens)\r\n result_list = []\r\n\r\n for token_types in address_combinations():\r\n if len(token_types) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a JSON request string for location information from google. The access points are a map from mac addresses to access point information dicts. >>> wlp = WifiLocationProvider() >>> wlp.request_dict()["wifi_towers"] | def request_dict(self):
ap_map = self.get_access_points()
if not ap_map:
return None
request = dict()
request["version"] = "1.1.0"
request["host"] = "localhost"
request["request_address"] = True
request["address_language"] = "en_GB"
... | [
"def google(self):\r\n prefix ='https://maps.googleapis.com/maps/api/staticmap?center='\r\n middle = '&zoom=14&size=400x400&markers='\r\n suffix = '&key=AIzaSyD5nqmDGFH1SUZxJAYVtFHP7RNjjFE9CHg'\r\n marker = '+'.join(self.placeToSearch) # marker in google format, no space but + separator\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract features from points. | def extract_feat(self, points, img_metas=None):
voxels, num_points, coors = self.voxelize(points)
voxel_features = self.voxel_encoder(voxels, num_points, coors)
batch_size = coors[-1, 0].item() + 1
x = self.middle_encoder(voxel_features, coors, batch_size)
x = self.backbone(x)
... | [
"def extract_feat(self, points, img, img_metas):\n img_feats = self.extract_img_feat(img, img_metas)\n pts_feats = self.extract_pts_feat(points, img_feats, img_metas)\n return (img_feats, pts_feats)",
"def features_from_points(self):\n\n filtered_data = self.load_filtered_data()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply hard voxelization to points. | def voxelize(self, points):
voxels, coors, num_points = [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.voxel_layer(res)
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxels = torch.c... | [
"def voxelize(self, points):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the grid data from the sample earth_relief file. | def fixture_grid():
return load_earth_relief(registration="pixel") | [
"def fixture_grid():\n return load_earth_relief(registration=\"gridline\")",
"def load_grid(self):\n if not hasattr(self, 'grid_filename'):\n raise AttributeError(\"Grid filename is not defined!\")\n self.grid = np.loadtxt(self.grid_filename, \n delimiter ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
grdfilter an input DataArray, and output as DataArray. | def test_grdfilter_dataarray_in_dataarray_out(grid):
result = grdfilter(grid=grid, filter="g600", distance="4")
# check information of the output grid
assert isinstance(result, xr.DataArray)
assert result.coords["lat"].data.min() == -89.5
assert result.coords["lat"].data.max() == 89.5
assert res... | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
grdfilter an input DataArray, and output to a grid file. | def test_grdfilter_dataarray_in_file_out(grid):
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdfilter(grid, outgrid=tmpfile.name, filter="g600", distance="4")
assert result is None # grdfilter returns None if output to a file
result = grdinfo(tmpfile.name, per_column=True)
a... | [
"def test_grdfilter_file_in_dataarray_out():\n outgrid = grdfilter(\n \"@earth_relief_01d\", region=\"0/180/0/90\", filter=\"g600\", distance=\"4\"\n )\n assert isinstance(outgrid, xr.DataArray)\n assert outgrid.gmt.registration == 1 # Pixel registration\n assert outgrid.gmt.gtype == 1 # Geo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
grdfilter an input grid file, and output as DataArray. | def test_grdfilter_file_in_dataarray_out():
outgrid = grdfilter(
"@earth_relief_01d", region="0/180/0/90", filter="g600", distance="4"
)
assert isinstance(outgrid, xr.DataArray)
assert outgrid.gmt.registration == 1 # Pixel registration
assert outgrid.gmt.gtype == 1 # Geographic type
# ... | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
grdfilter an input grid file, and output to a grid file. | def test_grdfilter_file_in_file_out():
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdfilter(
"@earth_relief_01d",
outgrid=tmpfile.name,
region=[0, 180, 0, 90],
filter="g600",
distance="4",
)
assert result is None # return ... | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that grdfilter fails correctly. | def test_grdfilter_fails():
with pytest.raises(GMTInvalidInput):
grdfilter(np.arange(10).reshape((5, 2))) | [
"def test_filter_errors(self):\n\n with self.assertRaises(ValueError):\n self.test_table.filter()\n\n with self.assertRaises(ValueError):\n self.test_table.filter(mode='wrongmode', Property='Property')",
"def test_no_filter(self):\n self.assertRaises(IndexError, svo.Filt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate Pydantic Model files given the Postman Collection input file. | def generate_models(input_file):
if not os.path.exists(input_file):
console.print(
f":pile_of_poo: [bold red]No file found at the given path:[/bold red] [i yellow]{input_file}[/i yellow]"
)
exit(1)
# TODO: Add try/catch for other possible errors
collection = postman.load... | [
"def build_it(swagger_file: str):\n load_stable(swagger_file)\n prep_package(model_package)\n write_modules(model_package)",
"def parse_file(self, file_path, filename):\n try:\n f = open(file_path, \"r\")\n print(f\"{file_path}:\")\n\n # Returns JSON object as a di... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that update_status creates a correctly formatted url. Compares the url created by update_status to correct_url | def test_update_status(self):
content_url = 'https://api.github.com'
status = 'success'
token = '123'
correct_url = 'https://123:x-oauth-basic@api.github.com/'
post_req = update_status(content_url, status, token)
self.assertEqual(correct_url, post_req.url)
"""
... | [
"def _checkServiceURL(self, serviceName, options):\n url = self._getURL(serviceName, options)\n system = options['System']\n module = options['Module']\n self.log.info(\"Checking URLs for %s/%s\" % (system, module))\n urlsConfigPath = os.path.join('/Systems', system, self.setup, 'URLs', module)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method for making a request to the Blockstore REST API | def api_request(method, url, **kwargs):
if not settings.BLOCKSTORE_API_AUTH_TOKEN:
raise ImproperlyConfigured("Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.")
kwargs.setdefault('headers', {})['Authorization'] = f"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}"
response = requests.reques... | [
"def GetBlock(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _call(self, method, endpoint, content=None, params=None):\n\t\tparams = params or {}\n\t\tcontent = conte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given data about a Collection returned by any blockstore REST API, convert it to a Collection instance. | def _collection_from_response(data):
return Collection(uuid=UUID(data['uuid']), title=data['title']) | [
"def get_collection():",
"def get_collection(self, collection):\n return self.client[self.DATABASE][collection]",
"def _translate_to_collection(\n self,\n collection,\n recursive=False,\n run_conditions=[],\n resource_conditions=[],\n variety_conditions=[],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given data about a Bundle returned by any blockstore REST API, convert it to a Bundle instance. | def _bundle_from_response(data):
return Bundle(
uuid=UUID(data['uuid']),
title=data['title'],
description=data['description'],
slug=data['slug'],
# drafts: Convert from a dict of URLs to a dict of UUIDs:
drafts={draft_name: UUID(url.split('/')[-1]) for (draft_name, ur... | [
"def bundle(self):\n return dict(bundle=self.data['bundle'])",
"def build_bundle(self, obj=None, data=None, request=None, objects_saved=None):\n\n return Bundle(\n obj=obj,\n data=data,\n request=request,\n objects_saved=objects_saved\n )",
"def b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given data about a Draft returned by any blockstore REST API, convert it to a Draft instance. | def _draft_from_response(data):
return Draft(
uuid=UUID(data['uuid']),
bundle_uuid=UUID(data['bundle_uuid']),
name=data['name'],
updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),
files={
path: DraftFile(path=path, **file)
for path, ... | [
"def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))",
"def article2draft(article):\n draft = Draft(\n article._content,\n article.metadata,\n article.settings,\n article... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new bundle. Note that description is currently required. | def create_bundle(collection_uuid, slug, title="New Bundle", description=""):
result = api_request('post', api_url('bundles'), json={
"collection_uuid": str(collection_uuid),
"slug": slug,
"title": title,
"description": description,
})
return _bundle_from_response(result) | [
"def create_bundle(self):\n self._highest_bundle_id += 1\n bundle = Bundle(document=self, bundle_id=str(self._highest_bundle_id))\n self.bundles.append(bundle)\n bundle.number = len(self.bundles)\n return bundle",
"def create_bundle_info(self, template):\n # We don't supp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a bundle's title, description, slug, or collection. | def update_bundle(bundle_uuid, **fields):
assert isinstance(bundle_uuid, UUID)
data = {}
# Most validation will be done by Blockstore, so we don't worry too much about data validation
for str_field in ("title", "description", "slug"):
if str_field in fields:
data[str_field] = fields.... | [
"def update_in_place(self, request, original_bundle, new_data):\r\n\r\n # TODO: Is this the place to use MongoDB atomic operations to update the document?\r\n\r\n from tastypie.utils import dict_strip_unicode_keys\r\n original_bundle.data.update(**dict_strip_unicode_keys(new_data))\r\n\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the specified draft, removing any staged changes/files/deletes. Does not return any value. | def delete_draft(draft_uuid):
api_request('delete', api_url('drafts', str(draft_uuid))) | [
"def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })",
"def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return Ht... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the details of the specified bundle version | def get_bundle_version(bundle_uuid, version_number):
if version_number == 0:
return None
version_url = api_url('bundle_versions', str(bundle_uuid) + ',' + str(version_number))
return api_request('get', version_url) | [
"def fetch_version(bundle_id, store):\n info_url = \"http://itunes.apple.com/\" + store + \"/lookup?bundleId=\" + bundle_id\n info = requests.get(info_url).json()\n return info['results'][0]['version']",
"def version_info(): \n return VERSION_s",
"def _get_via_app_bundle(self, path: pathl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a list of the files in the specified bundle version | def get_bundle_version_files(bundle_uuid, version_number):
if version_number == 0:
return []
version_info = get_bundle_version(bundle_uuid, version_number)
return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info["snapshot"]["files"].items()] | [
"def get_versioned_files():\n output = run(['bzr', 'ls', '-VR'])\n return output.splitlines()",
"def get_version_files(self, package, version):\n with self._conn.begin():\n return {\n row.filename\n for row in self._conn.execute(\n \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a dictionary of the links in the specified bundle version | def get_bundle_version_links(bundle_uuid, version_number):
if version_number == 0:
return {}
version_info = get_bundle_version(bundle_uuid, version_number)
return {
name: LinkDetails(
name=name,
direct=LinkReference(**link["direct"]),
indirect=[LinkReferen... | [
"def get_all_bundle_hrefs():\n\n print '*** Printing all available bundle hrefs...'\n common.bundle_list_map(print_href)",
"def get_bundle_links(bundle_uuid, use_draft=None):\n bundle = get_bundle(bundle_uuid)\n if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-tes... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a dict of all the files in the specified bundle. Returns a dict where the keys are the paths (strings) and the values are BundleFile or DraftFile tuples. | def get_bundle_files_dict(bundle_uuid, use_draft=None):
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).files
... | [
"def get_bundle_files(bundle_uuid, use_draft=None):\n return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating",
"def get_bundle_version_files(bundle_uuid, version_number):\n if version_number == 0:\n return []\n version_info = get_bu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an iterator over all the files in the specified bundle or draft. | def get_bundle_files(bundle_uuid, use_draft=None):
return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating | [
"def get_files(self, block):\n \n raise NotImplementedError('get_files')",
"def getFiles(self, getContent=False):\n for index, file in enumerate(self.files):\n if getContent:\n logger.debug(\n \"get file {} from service {}\".format(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a dict of all the links in the specified bundle. Returns a dict where the keys are the link names (strings) and the values are LinkDetails or DraftLinkDetails tuples. | def get_bundle_links(bundle_uuid, use_draft=None):
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).links
e... | [
"def get_bundle_version_links(bundle_uuid, version_number):\n if version_number == 0:\n return {}\n version_info = get_bundle_version(bundle_uuid, version_number)\n return {\n name: LinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indire... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create or overwrite the file at 'path' in the specified draft with the given contents. To delete a file, pass contents=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything. | def write_draft_file(draft_uuid, path, contents):
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'files': {
path: encode_str_for_draft(contents) if contents is not None else None,
},
}) | [
"def delete_file_contents(path):\n with open(path,'w'):\n pass",
"def create_file(path: Path, content: str) -> None:\n path.touch()\n with path.open(\"w\") as f:\n f.write(content)",
"def create_draft(metadata_path, publish, user, owners, vanity_pid):\n recid = None\n identity = get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create or replace the link with the given name in the specified draft so that it points to the specified bundle version. To delete a link, pass bundle_uuid=None, version=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything. | def set_draft_link(draft_uuid, link_name, bundle_uuid, version):
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'links': {
link_name: {"bundle_uuid": str(bundle_uuid), "version": version} if bundle_uuid is not None else None,
},
}) | [
"def add_link_to_bundle(request, bundle_id):\n\n # ensure bundle exists\n bundle = get_object_or_404(Bundle, id=bundle_id)\n\n # get/create link for given url\n url = request.data.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationEr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure that the given URL Blockstore is a URL accessible from the end user's browser. | def force_browser_url(blockstore_file_url):
# Hack: on some devstacks, we must necessarily use different URLs for
# accessing Blockstore file data from within and outside of docker
# containers, but Blockstore has no way of knowing which case any particular
# request is for. So it always returns a URL s... | [
"def _validate_url(self, url):\n return",
"def provide_url_validity(self, item):\n return True",
"def is_blocked(url):\n base_url = url[(url.find(\"://\") + 3):] # Strip any protocol\n base_url = base_url[: base_url.find(\"/\")] # Strip path\n result = BlockedDomain.objects(url__startswit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the forward pass for the tanh activation function. | def tanh_forward(self, x):
#############################################################################
# TODO: Implement the tanh forward pass. #
#############################################################################
out = np.tanh(x)
... | [
"def tanh():\r\n\r\n\treturn activations.tanh",
"def forward_hidden_activation(self, X):\n return np.tanh(X)",
"def test_tanh_activation(self):\n self.assertEqual([0.099668, 0.099668], list(\n af.TanH().output(np.array([0.1, 0.1]))))\n self.assertEqual([0.990066, 0.990066], list(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the forward pass of a rectified linear unit (ReLU). | def relu_forward(self, x):
#out = None
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
out = n... | [
"def forward(self, x):\n # Flatten\n x = x.view(-1, 32*32*3)\n\n # FC->ReLU\n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.fc_drop1(x)\n \n # FC->ReLU\n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.fc_drop2(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the backward pass for a layer of rectified linear units (ReLUs). | def relu_backward(self, dUpper, cache):
x = cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
... | [
"def backward_pass(self, accum_grad):\n raise NotImplementedError()",
"def conv_relu_backward(dout, cache):\n conv_cache, relu_cache = cache\n da = layers.relu_backward(dout, relu_cache)\n dx, dw, db = layers.conv_backward_fast(da, conv_cache)\n return dx, dw, db",
"def l_model_backward(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns count of sequences in given fasta file(s) The input_fasta_files is a list of fasta filepaths | def get_sequence_count(input_fasta_files):
# Correction for the case that only one file passed
if type(input_fasta_files)==str:
input_fasta_files=[input_fasta_files]
count=0
for n in input_fasta_files:
fasta_f=open(n,'U')
for label,seq in MinimalFastaParser(fasta_f)... | [
"def count_seqs_in_filepaths(fasta_filepaths, seq_counter=count_seqs):\r\n total = 0\r\n counts = []\r\n inaccessible_filepaths = []\r\n # iterate over the input files\r\n for fasta_filepath in fasta_filepaths:\r\n # if the file is actually fastq, use the fastq parser.\r\n # otherwise u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds list of primer objects from initial_primers | def construct_primers(initial_primers):
primers=[]
for n in initial_primers:
primers.append(ProspectivePrimer(n[0],n[1],initial_primers[n]))
return primers | [
"def initialize_priors(self):\n assert self.components is not None, 'Cannot initialize priors when components is None.'\n priors = [x.initialize_priors() for x in self.components]\n return priors",
"def filter_primers(self, primers):\n obj = PDPGenomeAmplicons(self.name + \"_filtered\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert DNA codes to numeric values for bitwise comparisons returns a numeric list corresponding to the nucleotide sequence | def convert_to_numeric(sequence):
int_mapped_seq=[]
DNA_to_numeric = get_DNA_to_numeric()
for n in sequence:
int_mapped_seq.append(DNA_to_numeric[n])
return int_mapped_seq | [
"def dna_to_number(dna):\n return list(map(int, dna.translate(str.maketrans(\"ATCG\", \"0123\"))))",
"def nucleotide_numbering():\n nucleotide_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n number_to_nucleotide = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}\n return nucleotide_to_number, number_to_nucleotide"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a corrected unaligned index based on aligned index | def get_corrected_index(seq,
aligned_index):
# Counts the number of nucleotides in aligned sequence, returns
# count of nucleotides occuring before aligned index reached
slice_seq=seq[0:aligned_index]
# If different gap characters used, may need to modify this
# In curre... | [
"def _get_aligned_index(alignment: List[tuple], index: int):\n aligned_index = 0\n idx = 0\n\n while idx < index:\n if alignment[aligned_index][0] != EPS:\n idx += 1\n aligned_index += 1\n while alignment[aligned_index][0] == EPS:\n aligned_index += 1\n return aligned_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Appends upstream and downstream sequence information for primer hit Because some sequences may be hit near the 5' or 3' end of sequence read, it is necessary to append N's to the upstream or downstream region. This makes both visual inspection of the primers easier and allows for alignment objects to be loaded given a ... | def append_primer_hit(primer,
label,
hit_index,
region_slice,
overall_length,
unaligned_seq,
primer_len):
primer.match_count+=1
primer.labels.append(label.split()[0])
... | [
"def primer_start_fix(self):\r\n #TODO this function will not be used anymore, remove?\r\n if self.type in [\"forward_primer\", \"reverse_primer\", \"PCR_product\"]:\r\n self.start += 1\r\n if self.type == \"region\" and self.source == \"Primer3\":\r\n # this is the region... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
searches through integer mapped sequence to find specific matches This function does not append data from sequences, rather its purpose is to eliminate nonspecific primers before the sensitive primers (along with the associated sequence data) are built. | def find_specific_primer_matches(primers,
integer_mapped_seq,
deletion_threshold,
seq_count,
sequence_length,
label,
unali... | [
"def handle_seq(seq, barcode_map, result_dict):\n for i in range(len(seq)):\n for barcode in barcode_map.keys():\n possible_match = seq[i: i + len(barcode)]\n if possible_match == barcode:\n result_dict[barcode][i] += 1",
"def find_match(line,dic):\n seqid = line[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterates list of primer objects, calculates percent matches | def calculate_percent_match(primers,
seq_count,
exclude_seq_count=1):
# Calculate percent of sequences that are 'hit' by each primer
for n in range(len(primers)):
# Calculate percent perfect match
primers[n].percent_match=float(primers[n].m... | [
"def score(self):\n scores = {}\n all_matches_finished = True\n for match in self.match_set.all():\n if not match.finished:\n all_matched_finished = False\n continue\n match.score()\n for competitor in match.competitor_set.all():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Appends standard aligned index value to ProspectivePrimer objects | def append_std_aligned_index(primers,
standard_index_seq,
region_slice):
for n in primers:
n.std_index = True
standard_unaligned_index = get_corrected_index(standard_index_seq,
n.aligned_index)
# 5' for forward primer wo... | [
"def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reinde... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CASSANDRA9871 Test that we can replace a node that is shutdown gracefully. | def replace_shutdown_node_test(self):
self._replace_node_test(gently=True) | [
"def testShutdownNode1(self):\n self._testDeleteNode('shutdown')",
"def test_node_graceful_shutdown(self, proc_info, controller_node):\n launch_testing.asserts.assertExitCodes(proc_info, process=controller_node)",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.clus... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
When starting a node from a clean slate with the same address as an existing down node, the node should error out even when auto_bootstrap = false (or the node is a seed) and tell the user to use replace_address. CASSANDRA10134 | def fail_without_replace_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
cluster.seeds.remove(node3)
NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')
if DISABLE_VNODES:
... | [
"def reboot(self, node):",
"def cmd_nodeinit(args):\n\n if args.nodekey_secretname is None:\n raise Error(\"--nodekey-secretname is required\")\n\n conf = load_config(args)\n\n token = args.token or get_workload_token()\n genesis = get_blob(token, conf.bucket, conf.genesis_blob, isjson=True)\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
To handle situations such as failed disk in a JBOD, it may be desirable to replace a node without bootstrapping. In such scenarios best practice advice has been to wipe the node's system keyspace data, set the initial tokens via cassandra.yaml, startup without bootstrap and then repair. Starting the node as a replaceme... | def unsafe_replace_test(self):
debug('Starting cluster with 3 nodes.')
cluster = self.cluster
cluster.populate(3)
cluster.set_batch_commitlog(enabled=True)
node1, node2, node3 = cluster.nodelist()
cluster.seeds.remove(node3)
NUM_TOKENS = os.environ.get('NUM_TOKENS... | [
"def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_V... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that replace fails when there are insufficient replicas CASSANDRA11848 | def replace_with_insufficient_replicas_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
if DISABLE_VNODES:
num_tokens = 1
else:
# a little hacky but gre... | [
"def test_redis_increase_replica_count_usual_case():",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def unsafe_replace_test(self):\n debug('Starting cluster with 3 nodes.')\n cluster = self.cluster\n cluster.populate(3)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that multidc replace works when rf=1 on each dc | def multi_dc_replace_with_rf1_test(self):
cluster = self.cluster
cluster.populate([1, 1])
cluster.start()
node1, node2 = cluster.nodelist()
node1 = cluster.nodes['node1']
yaml_config = """
# Create the keyspace and table
keyspace: keyspace1
keyspa... | [
"def test_data_source_soaps_id_replace_post(self):\n pass",
"def test_replace_namespaced_replica_set(self):\n pass",
"def test_replace_groups(self):\n pass",
"def test_replace_group(self):\n pass",
"def test_replace_identity(self):\n pass",
"def test_replace_namespaced_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize our `Finitefield` object with a given `prime` number | def __init__(self, prime):
if prime != 0: # Check if prime is different from zero
self.prime = prime # Assign it
else:
raise ValueError # Raise an error if prime is negative | [
"def __init__(s, p):\n Zmod.__init__(s, p)\n if s.element_class != FiniteFieldElement:\n raise ArithmeticError(\"Invalid Prime : %d\" % p)\n s.p = p",
"def __init__(self, prime, server):\n self.N = prime\n self.g = 2\n self.k = 3\n self.server = server",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain equivalence class of a certain number. | def equivalence(self, n):
return n % self.prime | [
"def classify_number(number):\n\tget_divisors = divisors(number)\n\tsum_divisors = sum_numbers(get_divisors)\n\t\n\tif sum_divisors > number:\n\t\treturn \"%d is an abundant number\" % number\n\telif sum_divisors == number:\n\t\treturn \"%d is a perfect number\" % number\n\telif sum_divisors < number:\n\t\treturn \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtain this finite fields `prime` number. | def get_prime(self):
return self.prime | [
"def prime(self):\n return self.__p",
"def get_prime(self):\n if(not self._constructed): raise EGCSUnconstructedStateError()\n return self._prime",
"def is_prime_field(cls) -> bool:\n return cls._degree == 1",
"def rf_prime(FpFmZ, alpha):\n return rf_prime2(FpFmZ, alpha)[0]",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns true if i is a leaf. True if i has no children | def is_leaf(self, i):
return len(self.children[i]) == 0 or len(self.pq[i]) == 0 | [
"def is_leaf(self):\n if len(self.children) == 0: #If the Node has no children, it's a leaf\n return True\n else:\n return False",
"def is_leaf(self) -> bool:\r\n return self.weight > 0 and self.subtrees == []",
"def isLeaf(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gives the children of node i that has elements elems. In this version, it grabs all 2 partitions if they are not there and caches this in children[i]. | def get_children(self, i, elems):
# if len(elems) == 1:
# return []
# elif self.explored[i]:
# return self.children[i]
# else:
self.children[i], self.children_elems[i] = self._get_children(list(elems)) # all_two_partitions(list(elems))
# self.update_... | [
"def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]",
"def childrenIter(self, preload = WITH_CONTENT_AND_TAGS):\n return self.kitab.nodeChildrenIter(self.idNum, preload)",
"def get_children_elements(self):\n\n pass",
"def children_recursiv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the node corresponding to the given elements, create new id if needed. Creates a new id if needed. | def record_node(self, elements: frozenset) -> int:
logging.debug('get node id from elements %s', str(elements))
if elements not in self.elems2node:
logging.debug('get node id from elements %s. new node! %s', str(elements), self.next_id)
logging.debug('Clusters =%s ', str(self.clu... | [
"def get_by_id( elements, id ):\n for e in elements:\n if e.get('id',None) == id:\n return e\n ret = get_by_id(e,id)\n if ret is not None:\n return ret\n return None",
"def update_node_id(node: Element) -> None:\n new_ids: list[str] = []\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Push RSPECs to Jira | def push_rspecs(host, auth, rspecs):
for rspec in rspecs:
description = rspec["fields"]["description"]
click.echo(f"Pushing {rspec['key']} ", err=True)
data = {
"update": {
"description": [
{
"set": description
... | [
"def jira_main(config):\n # set up the counters\n counters = Counters()\n # prepare for getting issues\n authenticate = (config.username, config.password)\n server = config.server\n api = config.jira_rest_api\n #\n # Get the list of projects. We'll process issues for each project\n proj_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve metadata describing an arrayset artifact. | def get_model_arrayset_metadata(database, model, aid, arrays=None, statistics=None, unique=None):
if isinstance(arrays, str):
arrays = slycat.hyperchunks.parse(arrays)
if isinstance(statistics, str):
statistics = slycat.hyperchunks.parse(statistics)
if isinstance(unique, str):
unique... | [
"def get_assets_metadata(self):\n return # osid.Metadata",
"def get_assets_metadata(self):\n return Metadata(**settings.METADATA['asset_ids'])",
"def get_assessments_metadata(self):\n return # osid.Metadata",
"def GetMetadata(self):\n return self.dict['meta']",
"def _getAllMeta(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start a new model array set artifact. | def put_model_arrayset(database, model, aid, input=False):
model = database.get('model',model["_id"])
slycat.web.server.update_model(database, model, message="Starting array set %s." % (aid))
storage = uuid.uuid4().hex
with slycat.web.server.hdf5.lock:
with slycat.web.server.hdf5.create(storage)... | [
"def autoCreateSet(self, layer):\n \n pass",
"def __init__(self, start_bag=None):\n self.da = DynamicArray()\n\n # populate bag with initial values (if provided)\n # before using this feature, implement add() method\n if start_bag is not None:\n for value in start_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write data to an arrayset artifact. | def put_model_arrayset_data(database, model, aid, hyperchunks, data):
# cherrypy.log.error("put_model_arrayset_data called with: {}".format(aid))
if isinstance(hyperchunks, str):
hyperchunks = slycat.hyperchunks.parse(hyperchunks)
data = iter(data)
slycat.web.server.update_model(database, model... | [
"def test_write_element(self):\n dt = np.dtype('(3,)f8')\n dset = self.f.create_dataset('x', (10,), dtype=dt)\n\n data = np.array([1,2,3.0])\n dset[4] = data\n\n out = dset[4]\n self.assertTrue(np.all(out == data))",
"def put_model_arrayset(database, model, aid, input=Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a model parameter in the couch database | def delete_model_parameter(database, model, aid):
with get_model_lock(model["_id"]):
del model["artifact:%s" % aid]
del model["artifact-types"][aid]
database.save(model) | [
"def delete_field(model, *arg):\n return model._pw_index_.delete_field(*arg)",
"def delete(self, model):\n pass",
"def delete_parameter(request, parameter, **_kwargs):\n pass",
"def delete(self, model):\n self._isinstance(model)\n model.key.delete()",
"async def rm_object(model, col... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a cached remote session for the given host. | def create_session(hostname, username, password):
return slycat.web.server.remote.create_session(hostname, username, password, None) | [
"def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the resonse_url and clean it to make sure that we are not being spoofed | def response_url():
current_url = urlparse(cherrypy.url()) # gets current location on the server
try:
location = cherrypy.request.json["location"]
if parse_qs(urlparse(location['href']).query)['from']: # get from query href
cleaned_url = parse_qs(urlparse(location['href']).query)['... | [
"def clean_url(self):\n\t\treturn clean_unique(self, 'url')",
"def rawurl(self)->URL:\n req = self.request\n while req.response != None:\n req = req.response.req\n return req.url",
"def normalize_resource_url(resource_url):\n ...",
"def _clean_url(self, url):\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
try and delete any outdated sessions for the user if they have the cookie for it | def clean_up_old_session(user_name=None):
cherrypy.log.error("cleaning all sessions for %s" % user_name)
if "slycatauth" in cherrypy.request.cookie:
try:
# cherrypy.log.error("found old session trying to delete it ")
sid = cherrypy.request.cookie["slycatauth"].value
c... | [
"def userLogout(self):\n cherrypy.lib.sessions.expire()",
"def _delete_session(self):\r\n if hasattr(self, \"session\"):\r\n sessiondata = self._get()\r\n # delete from datastore\r\n if sessiondata is not None:\r\n for sd in sessiondata:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check to see if the session user is equal to the apache user raise 403 and delete the session if they are not equal | def check_user(session_user, apache_user, sid):
if session_user != apache_user:
cherrypy.log.error("session_user::%s is not equal to apache_user::%s in standard auth"
"deleting session and throwing 403 error to the browser" % (session_user, apache_user))
# force a lock so ... | [
"def test_unauthorized_mod(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.modify_user(user=existing_user_id, password=id(self), code=403)",
"def authenticated_403(self):\n if self.get_current_user() is None:\n raise web.HTTPError(403)",
"def authenticated_403(self):\n if self.c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
checks that the connection is https and then returns the users remote ip | def check_https_get_remote_ip():
if not (cherrypy.request.scheme == "https" or cherrypy.request.headers.get("x-forwarded-proto") == "https"):
cherrypy.log.error("slycat-standard-authentication.py authenticate",
"cherrypy.HTTPError 403 secure connection required.")
rai... | [
"def getRemoteHost():",
"def remote_ip(self) -> str:\n if self._remote_ip is None:\n raise RuntimeError(\"not connected\")\n return self._remote_ip",
"def get_ip():\n rel_ip = None\n main_ip = unit_private_ip() if (\n not config.get('host') or (config.get('host') == \"none\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method that displays the original and blurred images | def displayImages(self):
plt.figure(figsize=(8,6))
plt.subplot(1,2,1)
plt.imshow( self.original_image, cmap="gray")
plt.title("Original Image")
plt.subplot(1,2,2)
plt.imshow( self.blurred_image, cmap="gray")
plt.title("Blurred Image") | [
"def show_images(self):\n\t\tself.im.show()\n\t\tself.kmeans_colorset_im.show()",
"def blurImage(self):\n\n print (\"--Blurring Main Image--\")\n self.blurButton.setDown(True)\n im = Image.open(self.ActivePhoto)\n blurred_image = im.filter(ImageFilter.GaussianBlur(1))\n blurred_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
paste a file or directory that has been previously copied | def paste(location):
copyData = settings.getDataFile()
if not location:
location = "."
try:
data = pickle.load(open(copyData, "rb"))
speech.speak("Pasting " + data["copyLocation"] + " to current directory.")
except:
speech.fail("It doesn't look like you've copied anything yet.")
speech.fail("Type 'hallie ... | [
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def _copy_file ( self, source, dest ):\n return",
"def cut_paste(src_path, dst_path):\n shutil.move(src_path, dst_path)\n return True",
"def cp_file(src, dest):\n if not os.path.exists(dest):\n shutil.copyfile(src, dest)",
"def f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display list of bookmarks for any given user | def user_list(request, user_name):
bookmarks = get_list_or_404(Bookmark.objects.all().filter(human__username=user_name))
return render(request, 'urly_bird/any_user_list.html', {'bookmarks': bookmarks}) | [
"def user_bookmark_list(request):\r\n queryset = Bookmark.objects.all()\r\n queryset = queryset.filter(user=request.user)\r\n return bookmark_list(request, queryset=queryset)",
"def getbookmarks(userid):\n\tif str(userid) == \"\":\n\t\tuserid = input(\"Bookmarks, userid? [return]=me: \")\n\tif userid == ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts cones in the GUIs frame of reference to cones in the lidar's frame of reference and gets those in the lidar's field of view Sets detected_cones with only cones within the lidar's field of view. Sorts the cones by angle starting at 135 degrees. | def lidarScan(self):
# Get cones seen by lidar
lidar_coords = []
for point in self.gui_points:
# Convert from gui frame to lidar frame
x = (point[0] - self.lidar_pos[0])*scaling_factor
y = (self.lidar_pos[1] - point[1])*scaling_factor
# Convert po... | [
"def pedestrians_in_field(self, vision_angle):\n\n rotatedNeighList = []\n i = -1\n # rotate all the neigbours facing either up or down\n \n for neigh in self.neighbours:\n i = i + 1\n\n if isinstance(neigh, Pedestrian):\n rotatedNeighList.appe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add more connection endpoints. Connection may have many endpoints, mixing protocols and types. | def addEndpoints(self, endpoints):
self.endpoints.extend(endpoints)
self._connectOrBind(endpoints) | [
"def add_endpoints(self, endpoints: Sequence):\n for url_rule, endpoint, *endpoint_attrs in endpoints:\n self.add_endpoint(url_rule, endpoint, **(endpoint_attrs or {}))",
"def add_endpoints(self, endpoints: Sequence):\n for endpoint, *endpoint_attrs in endpoints:\n self.add_end... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read multipart in nonblocking manner, returns with ready message or raising exception (in case of no more messages available). | def _readMultipart(self):
while True:
self.recv_parts.append(self.socket.recv(constants.NOBLOCK))
if not self.socket_get(constants.RCVMORE):
result, self.recv_parts = self.recv_parts, []
return result | [
"def recv_multipart(self, flags=0, copy=True, track=False):\r\n if flags & NOBLOCK:\r\n return _Socket_recv_multipart(self, flags, copy, track)\r\n\r\n # acquire lock here so the subsequent calls to recv for the\r\n # message parts after the first don't block\r\n with self._ev... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect and/or bind socket to endpoints. | def _connectOrBind(self, endpoints):
for endpoint in endpoints:
if endpoint.type == ZmqEndpointType.connect:
self.socket.connect(endpoint.address)
elif endpoint.type == ZmqEndpointType.bind:
self.socket.bind(endpoint.address)
else:
... | [
"def connect(self) -> None:\n self.s.connect((self.ip, self.port))",
"def connect(self):\n self._socket.connect((self._ip, self._port))",
"def bind_socket(self):\n try:\n self.socket.bind((self.host, self.port))\n self.socket.listen(5)\n except socket.error as e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get a single word's wordnet POS (PartofSpeech) tag. | def get_wordnet_pos(self, word):
# token = word_tokenize(word)
base_tag = pos_tag([word])[0][1][:2]
return self.pos_tag_dict.get(base_tag, wordnet.NOUN) | [
"def findPOS(word):\r\n\t\r\n lisPOS = list(wordtags[word])\r\n if \"ADJ\" in lisPOS:\r\n return \"ADJECTIVE\"\r\n if \"ADV\" in lisPOS:\r\n return \"ADVERB\"\r\n if \"NOUN\" in lisPOS:\r\n return \"NOUN\"",
"def pos_tag_wordnet(t2,text, pos_tag_type=\"pos_tag\"):\n pos_tagged_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans a single review (simplifies it as much as possible) | def clean_review(self, text):
text = text.lower() # lowercase capital letters
if self.remove_stopwords:
text = self.remove_stopwords_f(text, keep_neg_words=True)
text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)
# text = re.sub('[^a-z... | [
"def cleaned_up(review):\n # Remove the Beer Buddy suffix.\n suffix = '---Rated via Beer Buddy for iPhone'\n if review.endswith(suffix):\n review = review[:len(review) - len(suffix)]\n\n # Remove multiple spaces.\n review = ' '.join(review.split()) + '\\n\\n'\n\n # Delete the review if it's... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans a single resume (resume text) | def clean_resume(self, text):
text = text.lower() # lowercase capital letters
text = re.sub(r'(http|www)\S+\s*', '', text) # remove URLs
text = re.sub(r'\S+@\S+\s*', '', text) # remove emails
text = re.sub(r'@\S+\s*', '', text) # remove mentions
text = re.sub(r'#\S+\s*', '',... | [
"def clean_text2(text2, project_key):",
"def clean_text(text):\n # Replace newlines by space. We want only one doc vector.\n text = text.replace('\\n', ' ').lower()\n # Remove URLs\n #text = re.sub(r\"http\\S+\", \"\", text)\n # Expand contractions: you're to you are and so on.\n text = contract... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Euclidean distance Squared Euclidean distance more frequently used | def euc_dist(self, squared=True): | [
"def euclidean_dist(x, y):\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n if d != y.size(1):\n raise Exception\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n return torch.pow(x - y, 2).sum(2)",
"def get_sq_euclidean_dist(spatial_distances, rgb_distances)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test elementwise for fill values and return result as a boolean array. | def isfillvalue(a):
a = numpy.asarray(a)
if a.dtype.kind == 'i':
mask = a == -999999999
elif a.dtype.kind == 'f':
mask = numpy.isnan(a)
elif a.dtype.kind == 'S':
mask = a == ''
else:
raise ValueError('Fill value not known for dtype %s' % a.dtype)
return mask | [
"def _get_bool_array(self, nelements, masklist=None):\n self.assertTrue(nelements>0, \"Internal error. Length of array should be positive value\")\n if masklist is None: masklist=[]\n ret_array = numpy.array([ False for idx in xrange(nelements) ])\n for irange in xrange(len(masklist)):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the start/stop times in milliseconds since 111970 | def as_millis(self):
return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000) | [
"def millis():\n return time.time()*1000 - START_TIME_MS",
"def calc_time_diff_50MHz(start, stop):\n return (stop-start)*2e-8",
"def get_time_ms():\n return int(round(time.time() * 1000))",
"def elapsed_micros(start: int, /) -> int:",
"def elapsed_micros(start):\n pass",
"def get_time(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function to recursively check if two dicts are equal | def dict_equal(d1, d2):
if isinstance(d1, dict) and isinstance(d2, dict):
# check keysets
if set(d1) != set(d2):
return False
# otherwise loop through all the keys and check if the dicts and items are equal
return all((dict_equal(d1[key], d2[key]) for key in d1))
# ... | [
"def dicts_all_equal(d1, d2):\n if not dicts_keys_equal(d1, d2):\n return False\n return all(dicts_values_equal(d1, d2))",
"def equivalent_dicts(_a, _b):\n for _key in _a.keys():\n if _a[_key] != _b[_key]:\n return False\n return True",
"def dicts_equal(lhs, rhs):\n if le... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quantify misfit with some example data | def test_default_quantify_misfit(tmpdir):
preprocess = Default(syn_data_format="ascii", obs_data_format="ascii",
unit_output="disp", misfit="waveform",
adjoint="waveform", path_preprocess=tmpdir,
path_solver=TEST_SOLVER, source_prefix="SOURC... | [
"def test_Gfit_good_data(self):\n # example from p. 699, Sokal and Rohlf (1995)\n obs = [63, 31, 28, 12, 39, 16, 40, 12]\n exp = [\n 67.78125,\n 22.59375,\n 22.59375,\n 7.53125,\n 45.18750,\n 15.06250,\n 45.18750,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the Pyaflowa preprocess class can quantify misfit over the course of a few evaluations (a line search) and run its finalization task Essentially an integration test testing the entire preprocessing module works as a whole | def test_pyaflowa_line_search(tmpdir):
pyaflowa = Pyaflowa(
workdir=tmpdir,
path_specfem_data=os.path.join(TEST_SOLVER, "mainsolver", "DATA"),
path_output=os.path.join(tmpdir, "output"),
path_solver=TEST_SOLVER, source_prefix="SOURCE", ntask=2,
data_case="synthetic", componen... | [
"def test_predictor():",
"def test_main(self):\n kwargs = {'i': '../test_data/eps_example.out', 'o': 'fep_test.out', 't': 350, 'n': 4, 'b': -1, 'c': 1, 'noplot': True, 'e': 0.8, 'slope_only': False}\n # There's a necessary random component here that can potentially cause a RuntimError to be raised, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
dataList item renderer for Posts on the Bulletin Board. | def cms_post_list_layout(list_id, item_id, resource, rfields, record):
record_id = record["cms_post.id"]
#item_class = "thumbnail"
T = current.T
db = current.db
s3db = current.s3db
settings = current.deployment_settings
permit = current.auth.s3_has_permission
raw = record._row
dat... | [
"def renderable_items(self, post_data):\n\n renderable_items = [self._renderable_item_for_data_element(element) for element in post_data]\n for item_transformer in PostProcessor._RENDERABLE_ITEM_TRANSFORMERS:\n renderable_items = item_transformer(renderable_items)\n return renderable_items",
"def re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Count need lines per district and status (top 5 districts) for all open Events | def needs_by_district(cls):
T = current.T
db = current.db
s3db = current.s3db
table = s3db.need_line
ntable = s3db.need_need
etable = s3db.event_event
ltable = s3db.event_event_need
status = table.status
number = table.id.count()
locatio... | [
"def needs_by_district(cls):\n\n T = current.T\n\n db = current.db\n s3db = current.s3db\n\n table = s3db.req_need_line\n ntable = s3db.req_need\n\n left = ntable.on(ntable.id == table.need_id)\n\n status = table.status\n number = table.id.count()\n loc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write the design to the Specctra format | def write(self, design, filename):
self._convert(design)
with open(filename, "w") as f:
f.write(self._to_string(self.pcb.compose())) | [
"def write_spec(self):\n outfile = self.name + '.ospec'\n self.spec_file = outfile\n self.update_spec()\n self.append_log('# $> write_spec()\\n'\n '# >>> visgen spec: {0}\\n'\n .format(self.spec_file))\n with open(outfile, 'w') as f:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a pin into an outline | def _convert_pin_to_outline(self, pin):
pcbshape = specctraobj.Path()
pcbshape.layer_id = 'Front'
pcbshape.aperture_width = self._from_pixels(1)
pcbshape.vertex.append(self._from_pixels((pin.p1.x, pin.p1.y)))
pcbshape.vertex.append(self._from_pixels((pin.p2.x, pin.p2.y)))
... | [
"def draw_pin(self, pin, xform):\n # TODO special pin characteristics (inverted, clock)?\n line = [xform.chain(p) for p in (pin.p1, pin.p2)]\n self.canvas.line([(p.x, p.y) for p in line],\n fill=self.options.style['part'])",
"def draw_pins():\n\n pass",
"def get_o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Specctra does not have arcs so convert them to qarcs | def _get_arc_qarcs(self, arc):
min_angle = min(arc.start_angle, arc.end_angle)
max_angle = max(arc.start_angle, arc.end_angle)
def make_point(angle):
""" Make a point """
opp = math.sin(angle * math.pi) * arc.radius
adj = math.cos(angle * math.pi) * ... | [
"def test_circuit_qasm(self):\n qr1 = QuantumRegister(1, \"qr1\")\n qr2 = QuantumRegister(2, \"qr2\")\n cr = ClassicalRegister(3, \"cr\")\n qc = QuantumCircuit(qr1, qr2, cr)\n qc.p(0.3, qr1[0])\n qc.u(0.3, 0.2, 0.1, qr2[1])\n qc.s(qr2[1])\n qc.sdg(qr2[1])\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert points to paths | def _points_to_paths(self, points):
prev = points[0]
result = []
for point in points[1:]:
path = specctraobj.Path()
path.aperture_width = self._from_pixels(1)
path.vertex.append(prev)
path.vertex.append(point)
result.append(path)
... | [
"def _from_2d_points(points: list[Vec2]) -> path.Path2d:\n path2d = path.Path2d(points[0])\n for point in points[1:]:\n path2d.line_to(point)\n return path2d",
"def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the metric used in the search | def metric(self):
return self.__metric | [
"def get_metric(self) -> Dict[str, Any]:\n pass",
"def _get_metric(self, key):\n # type: (_TagNameType) -> Optional[NumericType]\n return self._dd_span.get_metric(key)",
"def metric_name(self):\n pass",
"def metric_names():\n return ['total', 'exe', 'fetch']",
"def queryMe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select an account and set it as the current 'working' account Calling this method also cleares the Batch Queue, if it isn't empty | def SelectAccount(self, nickname):
self.ClearBatchQueue()
if nickname in self.accounts:
self.current_account = self.accounts[nickname]
self.client = self.current_account.client
return True
else:
return False | [
"def selected_account(self, selected_account):\n\n self._selected_account = selected_account",
"def set_current_account(account, dry_run):\n cmd = ['gcloud', 'config', 'set', 'core/account', account]\n print('Run:', cmd, file=sys.stderr)\n if dry_run:\n return None\n return subprocess.ch... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clear the batch queue | def ClearBatchQueue(self):
self.batch_queue = gdata.contacts.data.ContactsFeed() | [
"def clear(self):\n\n self.queue = []",
"def clearQueueAll():",
"def clear_queue(self):\n self.queue = deque()",
"def reset_queue(self):\n self.cmd_queue.clear()",
"def clearQueue(self):\n \n print \"Clearing the queue of async queries.\"\n self.openjobs.clear()",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lazily get the first contact group's Atom Id | def GetFirstGroupId(self):
return self.client.GetGroups().entry[0].id.text | [
"def customer_group_get_one(group_id):\n return customer_group_get(group_id)",
"def getAtomSetId(self):\n\n if self.atomSetName:\n \n return [self.chain,self.seqId,self.atomSetName,self]\n \n else:\n \n return None",
"def min_group_id(self) -> int:\n\n group_ids = np.asarray... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a contact from the selected account | def RemoveContact(self, contact):
self.client.Delete(contact) | [
"def del_contact(self):\n widget = self.ui.listWidgetContactList\n contact = self.sender().property('id')\n self.client.del_contact(contact)\n index = self.client.contact_list.index(contact)\n widget.takeItem(index + 1)\n self.client.contact_list.remove(contact)",
"def re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove all contacts from the selected account | def RemoveAll(self):
contacts = self.GetContactList()
for contact in contacts:
self.BatchEnqueue('delete', contact)
self.ExecuteBatchQueue() | [
"def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()",
"def clear_contacts(self):\n self.session.query(self.Contacts).delete()",
"def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy all contacts from one account to another This method does not check for duplicates | def CopyContacts(self, from_nickname, to_nickname):
self.SelectAccount(from_nickname)
contacts = self.GetContactList()
self.SelectAccount(to_nickname)
for contact in contacts:
self.BatchEnqueue('create', contact)
self.ExecuteBatchQueue() | [
"def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Move all contacts from one account to another This method does not check for duplicates | def MoveContacts(self, from_nickname, to_nickname):
self.SelectAccount(from_nickname)
contacts = self.GetContactList()
# Copy contacts -before- deleting
self.SelectAccount(to_nickname)
for contact in contacts:
self.BatchEnqueue('create', contact)
self.ExecuteBatchQueue()
# Then delete
self.Sele... | [
"def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform a multiway sync between given accounts | def MultiWaySync(self, accounts):
cleaned_contacts = []
contacts = []
for account in accounts:
self.SelectAccount(account)
contacts.extend(self.GetContactList())
duplicates, originals = ceFindDuplicates(contacts)
merged, todelete = ceMergeDuplicates(duplicates)
cleaned_contacts.extend(origina... | [
"def remote_sync(self):\r\n #sync remotely \r\n pass",
"def update_samba():\n\n for server in Server.objects.filter(samba_management=True).exclude(ssh_connection_string_from_gestion=None).exclude(ssh_connection_string_from_gestion='').all():\n\n # Sync users\n # Get user list\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs `det experiment describe` CLI command on a finished experiment. Will raise an exception if `det experiment describe` encounters a traceback failure. | def run_describe_cli_tests(experiment_id: int) -> None:
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
... | [
"def build_describe(self, tree):\n assert isinstance(tree, sql_dialects.ast.Describe)\n raise NotImplementedError()",
"def do_main_function() -> None:\n args = options_parse()\n logger = get_logger(\"DESCRIBE application\", args)\n logger.info(\"DESCRIBE function is started \")\n if args... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs listrelated CLI commands on a finished experiment. Will raise an exception if the CLI command encounters a traceback failure. | def run_list_cli_tests(experiment_id: int) -> None:
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
s... | [
"def run(self, cmd_list):\n raise NotImplementedError",
"def test_pre_cli_list(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-l\"))\n assert \"test.yml\" in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)",
"def main_list(args)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle all options in the arguments. This function returns a dictionary contain 'input_pkg' and 'output_pkg' keywords. | def handle_arguments():
result = {'input_pkg':'', 'output_pkg':''}
try:
args = sys.argv[1:]
optlist = gnu_getopt(args, 'h', ['help'])
except GetoptError:
print 'Error when parsing arguments.'
more_informations()
if len(sys.argv) < 2:
print 'No input file.'
... | [
"def handle_arguments():\n result = {'input_pkg': '', 'output_pkg': ''}\n\n try:\n args = sys.argv[1:]\n optlist = gnu_getopt(args, 'h', ['help'])\n except GetoptError:\n print('Error when parsing arguments.')\n more_informations()\n\n if len(sys.argv) < 2:\n print('No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a message to speak on first load of the skill. Useful for postinstall setup instructions. | def get_intro_message(self):
self.speak_dialog("thank.you")
return None | [
"def on_launch():\n attributes=\"LAUNCH\"\n speech_message = SAYAS_INTERJECT + \"g'day.\" + SAYAS + BREAKSTRONG + WELCOME_MESSAGE\n return response( attributes,response_ssml_text(speech_message, False))",
"def at_before_say(self, message, **kwargs):\n return message",
"def get_phrase(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle conversation. This method gets a peek at utterances before the normal intent handling process after a skill has been invoked once. To use, override the converse() method and return True to indicate that the utterance has been handled. | def converse(self, utterances, lang="en-us"):
# check if game was abandoned midconversation and we should clean it up
self.maybe_end_game()
if self.playing:
ut = utterances[0]
# if self will trigger do nothing and let intents handle it
if self.will_trigger(ut)... | [
"def handle_converse_request(message):\n skill_id = int(message.data[\"skill_id\"])\n utterances = message.data[\"utterances\"]\n lang = message.data[\"lang\"]\n global ws, loaded_skills\n # loop trough skills list and call converse for skill with skill_id\n for skill in loaded_skills:\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a git commit message template for cases currently assigned to you. | def do_jira_case_commit_message(self, arg):
cases = [(issue.key, issue.fields.summary, self.jira_url() + "/browse/" + issue.key) for issue in self.get_open_issues()]
msg = """
--------------------------------------------------------------------
[{}] {}
<msg>
{}
------------------------... | [
"def create_template(conf):\n template = (\n '#!/bin/bash\\n'\n 'set -e\\n'\n 'REPO={0}\\n'\n 'git init $REPO\\n'\n 'cd $REPO\\n'\n 'touch decoy{1}\\n'\n 'git add decoy{1}\\n'\n '{2}\\n'\n )\n\n if conf['ssh']:\n template = ''.join([template,\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |