query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns aggregate result for mean or population standard deviation diagnosis integers in [1,6] or string in ['nci', 'mci', 'ad', 'other', 'na'] stat "mean" or "std_pop" | def get_gene_stat(entrez_id, diagnosis, stat, psql_conn):
NCI = [1]
MCI = [2,3]
AD = [4,5]
other = [6]
diagnosis_arr = ['nci', 'mci', 'ad', 'other', 'na']
if type(diagnosis) is int:
if diagnosis in NCI:
diagnosis = 'nci'
elif diagnosis in MCI:
diagnosis =... | [
"def extract_mean_and_std(entry):\n try:\n # First split according to whitespace, such that next, we are\n # dealing with two tuples.\n mean, std = entry.split(maxsplit=1)\n except ValueError:\n return np.nan, np.nan\n\n try:\n # This should always work for valid entries;... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds comment to existing note in the database. | def add_comment(session, text, note, user):
check_permission(session, PermissionType.COMMENT, user, note)
comment = Comment(body=text, note_id=note.id, owner_id=user.id)
session.add(comment) | [
"def add_note(self,note):\n q=\"insert into note(msg) values('%s')\"%(note.get_msg())\n try:\n NoteDB.cursor.execute(q)\n NoteDB.db.commit()\n except Exception as e:\n print(e)\n NoteDB.db.rollback()\n raise",
"def add_note(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to create a binary raster mask from polygons in a given geojson file, so as to label the pixels in the image as either background or target. | def training_mask_generation(input_image_filename, input_geojson_filename, labels):
with rasterio.open(input_image_filename) as f:
metadata = f.profile
mask = np.zeros((metadata['height'], metadata['width'], len(labels)))
xres = metadata['transform'][0]
ulx = metadata['tra... | [
"def binary_mask_to_polygon(binary_mask, tolerance):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to convert image files and their respective polygon training masks into numpy arrays, so as to facilitate their use for model training. | def training_data_generation(DATA_DIR, img_height_size, img_width_size, perc, buff, label_list):
if perc < 0 or perc > 1:
raise ValueError('Please input a number between 0 and 1 (inclusive) for perc.')
if buff < 0 or buff > 1:
raise ValueError('Please input a number between 0... | [
"def create_input(path):\n folder = path\n files = os.listdir(folder)\n x = []\n y = []\n image_paths = []\n scaler = MinMaxScaler(feature_range=(-0.1, 1.175))\n #noramlized as in LeCun, makes the mean input roughly 0 and the variance roughly 1.\n #This accelerates learning.\n for i, ima... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a directed graph is projective/noncrossing. A directed graph is projective/noncrossing with respect to some node ordering iff the arcs can be drawn without any crossing on the upper halfplane formed by placing the nodes along a line according to the ordering. | def is_projective(G: nx.DiGraph) -> bool:
return len(list(G.edges)) == len(list(get_projective_edges(G))) | [
"def is_dag(self):\n return len(self.sccs) == len(self.vertices)",
"def is_Bipartite(graph):\r\n if len(get_nodes(graph)) < 2:\r\n return False\r\n return True if paint(graph, 2) else False",
"def is_unibipartite(graph):\n src, dst, _ = graph.edges()\n return set(src.tonumpy()).isdisjo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get projective edges of a directed graph. An edge is projective iff it doesn't cross with another edge when drawn on the upper halfplane formed by placing the nodes along a line according to the node ordering. | def get_projective_edges(G: nx.DiGraph) -> Iterator[Tuple[Any, Any]]:
for head, depd in G.edges:
left, right = min(head, depd), max(head, depd)
for head2, depd2 in G.edges:
if head == head2 and depd == depd2: # identical edge
continue
left2, right2 = min(head... | [
"def get_proj_edges(edges: Collection[Tuple[int, int]]) -> Iterator[Tuple[int, int]]:\n adj_set: dict = defaultdict(set)\n for u, v in edges:\n adj_set[u].add(v)\n\n def dfs(root: int) -> Set[int]:\n stack, seen = [root], set()\n while stack:\n u = stack.pop()\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split a pathname into drive/UNC sharepoint and relative path specifiers. Returns a 2tuple (drive_or_unc, path); either part may be empty. If you assign result = splitdrive(p) | def splitdrive(p):
if len(p) >= 2:
if isinstance(p, bytes):
sep = b'/'
altsep = b'/'
colon = b':'
else:
sep = '/'
altsep = '/'
colon = ':'
normp = p.replace(altsep, sep)
if (normp[0:2] == sep*2) and (normp[2:3] !... | [
"def splitdrive(path):\r\n # Algorithm based on CPython's ntpath.splitdrive and ntpath.isabs.\r\n if path[1:2] == ':' and path[0].lower() in 'abcdefghijklmnopqrstuvwxyz' \\\r\n and (path[2:] == '' or path[2] in '/\\\\'):\r\n return path[:2], path[2:]\r\n return '', path",
"def splitdriv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the mappings from the (short) UUID to the MTURK ID, and the MTURK ID to the assignment ID. Returned as a tuple in that order. | def get_uuid_mturk_mapping():
my_hit = None
for hit in mtk.get_all_hits():
if hit.Title == api_secrets['mt_hit_title']:
my_hit = hit
uuid_mapping = {}
mturk_mapping = {}
if my_hit is not None:
id = my_hit.HITId
for assignment in mtk.get_assignments(id, page_size=... | [
"def _get_mapping_info_with_mpio(self):\n map_chl = {\n 'slot_a': []\n }\n if self._model_type == 'R':\n map_chl['slot_b'] = []\n\n # MPIO: Map all the channels specified in conf file\n # If MCS groups exist, only map to the minimum channel id per group\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests whether question content is bleached. | def test_bleaching(self):
q = QuestionFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("question-detail", args=[q.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
assert "<unbleached>" not in res.data["content"] | [
"def test_bleaching(self):\n a = AnswerFactory(content=\"<unbleached>Cupcakes are the best</unbleached>\")\n url = reverse(\"answer-detail\", args=[a.id])\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n assert \"<unbleached>\" not in res.data[\"content\"]",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that questions created via the API are autotagged. | def test_auto_tagging(self):
TagFactory(name="desktop")
q = QuestionFactory()
self.client.force_authenticate(user=q.creator)
tags_eq(q, [])
res = self.client.post(
reverse("question-set-metadata", args=[q.id]),
content_type="application/json",
... | [
"def test_get_specific_question(self):\n self.token = self.get_token()\n head = {'Content-Type': 'application/json', 'Authorization': 'JWT {}'.format(self.token)}\n\n self.test_client().post('/api/v1/questions', \\\n data=json.dumps(self.question), headers=head)\n\n question = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests whether answer content is bleached. | def test_bleaching(self):
a = AnswerFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("answer-detail", args=[a.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
assert "<unbleached>" not in res.data["content"] | [
"def test_bleaching(self):\n q = QuestionFactory(content=\"<unbleached>Cupcakes are the best</unbleached>\")\n url = reverse(\"question-detail\", args=[q.id])\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n assert \"<unbleached>\" not in res.data[\"content\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The entry method. If no price updates today was made run first update, otherwise just schedule next update for midnight. | async def run(self):
last_update = await self._get_last_update()
if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():
await self._update_prices()
else:
self._schedule_next_update() | [
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"async def _update_prices(self):\n async with self._pg.transaction() as db_conn:\n price_update_id = await self._create_price_update_record(db_conn)\n flights = await self._updater.get_che... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update prices and schedule new update for the midnight | async def _update_prices(self):
async with self._pg.transaction() as db_conn:
price_update_id = await self._create_price_update_record(db_conn)
flights = await self._updater.get_cheapest_flights()
flights_saved = await self._save_flights(db_conn, flights, price_update_id)
... | [
"async def run(self):\n last_update = await self._get_last_update()\n if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():\n await self._update_prices()\n else:\n self._schedule_next_update()",
"def _cron(self):\n while True... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the latest package release from PyPI. | def parse_latest(name):
response = requests.get(f"https://pypi.org/pypi/{name}/json")
response.raise_for_status()
releases = [parse(v) for v in response.json()["releases"]]
return max(v for v in releases if not v.is_prerelease) | [
"def get_latest_released_version(self) -> str:\n\n version = Specfile.get_upstream_version(\n versioneer=None,\n package_name=self.package_config.downstream_package_name,\n category=None,\n )\n logger.info(f\"Version in upstream registries is {version!r}.\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Show the setup form to the user. | async def _show_setup_form(self, errors=None):
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
vol.Required... | [
"async def _show_setup_form(self, errors=None):\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_HOST): str,\n vol.Required(CONF_PORT, default=DEFAULT_PORT): int,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return self's color number. | def get_color_number(self) -> typing.SupportsInt:
return self.__color | [
"def __int__(self):\n return self.color",
"def get_color_code(self):\n if self.color == 'r':\n return (254, 0, 0)\n else:\n return (0, 0, 0)",
"def getNum(self) -> \"int32_t\":\n return _coin.SoGLColorIndexElement_getNum(self)",
"def getNum(self) -> \"int32_t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
connect to sftp server and render correct sftp action depedent on switch statement directory have been added as defaulted just to show functionality. they should probably be moved when logic becomes more mature | def sftp_conn(action):
with pysftp.Connection(
os.getenv("MYHOST"),
username=os.getenv("THISUSER"),
password=os.getenv("SPASSWORD"),
) as sftp:
sftp.chdir(os.getenv("begin_path"))
render_sftp_action(sftp, action)
# list directory upon completion
post_actio... | [
"def sftp_connect():\n try:\n # Open a transport\n transport = paramiko.Transport(\n (db.ac_config_1[5], int(db.ac_config_1[6])))\n # Auth\n transport.connect(\n username=db.ac_config_1[7], password=db.ac_config_1[8])\n # Go!\n sftp = paramiko.SFTPC... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
New case window will open in new GUI. | def new_case(self):
self.dialog = NewCase(self) | [
"def open_new_window(self):\n handles = self.driver.window_handles\n for handle in handles:\n if handle != self.driver.current_window_handle:\n self.driver.switch_to.window(handle)\n # current_windows = self.driver.current_window_handle\n # all_handles = self.dr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the tokenized chemical equation. | def parse(expression, token_list, options, mexp_protected_header_enabled=False, mexp_protected_header_prefix="X"):
# Wrap the interface option.
if_opt = _interface_opt.OptionWrapper(options)
# Get the language ID.
lang_id = _l10n_opt.OptionWrapper(options).get_language_id()
# Initialize an emp... | [
"def parse_equation_terms(equation: str):\n\n def replace_type(term, new_type):\n if term.type == Type.VARIABLE:\n term = term._replace(type=new_type)\n return term\n\n left, right = equation.split('=', maxsplit=1)\n\n lhs_terms = [replace_type(t, Type.ENDOGENOUS) for t in parse_te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Discovery Request Builder aids in generating a discovery request for this protocol | def discovery_request_builder(self) -> PlcDiscoveryRequestBuilder:
raise PlcNotImplementedException(f"Not implemented for {self.protocol_name}") | [
"def create_discover_payload(self):\n discoverRequest = ET.Element(\"discoverRequest\")\n type = ET.SubElement(discoverRequest, \"type\")\n type.text = self._module.paramgram[\"type\"]\n if self._module.paramgram[\"root_ip\"] and self._module.paramgram[\"type\"] == \"SmartScan\":\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the RMSE metric for keras models during training | def rmse(y_true, y_pred):
# root mean squared error (rmse) for regression
# axis=-1
# print(K.int_shape(y_pred))
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=0)) | [
"def test_rmse(\n models: Tuple,\n X_test: pd.DataFrame,\n y_test: pd.Series,\n ) -> float:\n rmse = 0\n for model in models:\n y_pred = model.predict(X_test)\n rmse += np.sqrt(mean_squared_error(y_test, y_pred))\n return rmse / len(models)",
"def RMSE(y_true, y_pred... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Strips the tensor name to reflect the op name. | def format_tensor_name(name):
if name.startswith("^"):
name_old = name
name = name.strip("^")
log.warning("Changing \"{}\" to \"{}\"".format(name_old, name))
return name.split(":")[0]
# return name | [
"def _remove_scope_in_op_name(self, op_name):\n name_splits = op_name.split('.')\n node_type = name_splits[0]\n if node_type.isdigit() and self.node_map.get(node_type) is not None:\n name_splits = self.node_map[node_type].split('.')\n name_splits[1] = name_splits[1].split('_')... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a node list into an adjacency matrix. | def convert_node_list_to_adj_mat(node_list):
idx_table = {}
ord_table = {}
def add_node(table, node):
if node.name in table:
return
for subnode_name in node.input:
# Not sure if this will fix the variable
# assign issue.
# sbn = subnode_name.strip("^")\
sbn = subnode_name
... | [
"def adjacency_list_to_matrix(adj_list):\n n_nodes = len(adj_list)\n M = np.zeros(shape=(n_nodes, n_nodes))\n for vertex, lst in adj_list.iteritems():\n for v in lst:\n M[vertex][v] = 1\n return M",
"def matrix_adjacency_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the path cover in string format. | def get_path_cover_str(node_list, src, dst):
return get_path_cover_str_list(node_list, src, [dst]) | [
"def cover_path(self):\n return None",
"def cover(self):\n cp = self.cover_path\n return open(cp, 'rb') if cp and path.isfile(cp) else None",
"def __str__(self):\r\n return self._path",
"def get_full_path(self) -> str:\r\n return self.location + \"\\\\\" + self.filename + \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function performs oversampling on flow stacks. 1. corner cropping + center cropping (x5) 2. horizontal flipping (x2) | def flow_stack_oversample(flow_stack, crop_dims):
im_shape = np.array(flow_stack.shape[1:])
stack_depth = flow_stack.shape[0]
crop_dims = np.array(crop_dims)
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
h_center_offset = (im_shape[0] - crop_dims[0])/2... | [
"def multi_crop(path_in, path_out, input_shape=(1292, 968), target_shape=(644, 644), bottom_right=False,\n random_crop=0):\n\n print('Starting multi_crop')\n # Create the folder that will hold all images:\n if os.path.exists(path_out):\n shutil.rmtree(path_out, ignore_errors=True)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the measurements that would be output by a pipeline This function calls Pipeline.get_measurement_columns() to get the measurements that would be output by a pipeline. This can be used in a workflow tool or LIMS to find the outputs of a pipeline without running it. For instance, someone might want to integrate Cel... | def print_measurements(options):
if options.pipeline_filename is None:
raise ValueError("Can't print measurements, no pipeline file")
pipeline = Pipeline()
def callback(pipeline, event):
if isinstance(event, LoadException):
raise ValueError("Failed to load %s" % options.pipeli... | [
"def print_pipeline_and_problem(pipeline: dict, problem: str):\n logger.info(\"Pipeline:\")\n logger.info(get_list_vertically(primitive_list_from_pipeline_object(pipeline)))\n logger.info(\"on problem {} \\n\\n\".format(problem))",
"def print_measurements (self, results):\n print \"\"\n tab... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the image set groups for this pipeline This function outputs a JSON string to the console composed of a list of the groups in the pipeline image set. Each element of the list is a twotuple whose first element is a key/value dictionary of the group's key and the second is a tuple of the image numbers in the group. | def print_groups(filename):
path = os.path.expanduser(filename)
m = Measurements(filename=path, mode="r")
metadata_tags = m.get_grouping_tags()
groupings = m.get_groupings(metadata_tags)
json.dump(groupings, sys.stdout) | [
"def print_groups(self):\n\n text = ''\n\n # print out a starting message, and print headers.\n # print('printing groups')\n text += self.print_header()\n\n # print out the row numbers and the contents of the\n # rows, with the values in m represented by groups\n # a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the commands needed to run the given batch data file headless filename the name of a Batch_data.h5 file. The file should group image sets. The output assumes that the executable, "CellProfiler", can be used to run the command from the shell. Alternatively, the output could be | def get_batch_commands(filename, n_per_job=1):
path = os.path.expanduser(filename)
m = Measurements(filename=path, mode="r")
image_numbers = m.get_image_numbers()
if m.has_feature(IMAGE, GROUP_NUMBER):
group_numbers = m[
IMAGE, GROUP_NUMBER, image_numbers,
]
group... | [
"def main():\n\n # file-specific constants\n section_header = 'Python Scikit-learn Models'\n table_header_list = ['Model Name', 'Model Description', 'Data Name',\n 'Data Description', 'Performance Metric 1',\n 'Performance Metric 2']\n\n # determine output... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a CellProfiler pipeline in headless mode | def run_pipeline_headless(options, args):
if options.first_image_set is not None:
if not options.first_image_set.isdigit():
raise ValueError("The --first-image-set option takes a numeric argument")
else:
image_set_start = int(options.first_image_set)
else:
image_s... | [
"async def _instantiate_browser(self, headless: bool) -> None:\n self.browser = await pyppeteer.launch(headless=headless)",
"def setup_chromedriver():\n global chrome_options\n global driver\n print(time() + \"[ INFO ] Starting Chromedriver...\")\n chrome_options = Options()\n if args.withou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the upgrade process for the given module. Raises exception on errors, caller must handle end exit cleanly. Expects that the db has been initialized already via call to init_database() or similar | def upgrade_db(code_versions: dict, db_versions: dict, upgrade_module):
# Load the module for upgrade (provides the upgrade routines etc
module = upgrade_module
versions_tuple = needs_upgrade(code_versions, db_versions)
if versions_tuple:
code_db_version = versions_tuple[0]
running_db_v... | [
"def upgrade(config, module, version, module_args, bdb, file):\n if module_args is None:\n module_args = click.prompt('New Custom Module Args', default='' )\n\n upgraded_module = deploy_module(config, module, version, file)\n #print(\"Module UID: {}\".format(upgraded_module[\"uid\"]))\n bdb_modul... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get mtx file list for which ivectors needs to be extracted | def get_mtx_list_for_extraction(extract_list):
ext = os.path.basename(extract_list).split(".")[-1]
mtx_list = []
if ext == "mtx":
mtx_list = [extract_list]
else:
mtx_list = utils.read_simple_flist(extract_list)
return mtx_list | [
"def get_files(evolution_model):\n \n modeldir = os.path.join(basedir, '../Models')\n \n if evolution_model == 'mist':\n filename = 'MIST_v1.2_vvcrit0.0_feh_*.fits'\n \n elif evolution_model == 'yapsi':\n filename = 'YaPSI_feh_*.fits'\n \n else:\n # default to MIST if models not recog... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create optimizers for SMM model | def create_optimizers(model, config):
if config['optim'] == 'adagrad':
torch_optim = torch.optim.Adagrad
else:
torch_optim = torch.optim.Adam
opt_t = torch_optim([model.T, model.m], lr=config['eta_t'])
opt_q = torch_optim([model.Q], lr=config['eta_q'])
optims = {'Q': opt_q, 'T': o... | [
"def create_sgd_optimizers_fn(datasets, model, learning_rate, momentum=0.9, weight_decay=0, nesterov=False, scheduler_fn=None, per_step_scheduler_fn=None):\n optimizer_fn = functools.partial(\n torch.optim.SGD,\n lr=learning_rate,\n momentum=momentum,\n weight_decay=weight_decay,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract posterior distribution of ivectors using existing model | def extract_ivector_posteriors(args):
# -- configuration --
cfg_f = os.path.dirname(os.path.realpath(args.model_f)) + "/config.json"
config = json.load(open(cfg_f, 'r'))
os.makedirs(config['tmp_dir'] + 'ivecs/', exist_ok=True)
config['xtr_done'] = 0
config['xtr_iters'] = args.xtr
config['n... | [
"def posterior(self): \n # create a grid over which we will calculate the likelihood\n self.p_grid = np.linspace(0, 1, num = self.g)\n # calculate the probability of observing the data\n self.likelihood = stats.binom.pmf(self.k,self.n,p = self.p_grid)\n # multiply with prior\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Launch filtering, sorting and paging to output results. | def run(self):
# count before filtering
self.cardinality = self.query.count()
# the term entered in the datatable's search box
self.filtering()
# field chosen to sort on
self.sorting()
# pages have a 'start' and 'length' attributes
self.paging()
... | [
"def main():\n entries = get_feed_entries()\n while True:\n try:\n search_term = input('Search for (q for exit): ').lower()\n except EOFError:\n break\n\n if search_term == '':\n print('Please provide a search term')\n\n if search_term != '' and sea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructor for the Fetch Subset object | def __init__(self, no_of_subset: int = None, subset_list: list = []):
self.no_of_subset = no_of_subset
self.subset_list = subset_list
self.getSubsetIndex() | [
"def get_subset(self, subset: Subset) -> \"DatasetEntity\":\n dataset = DatasetEntity(\n items=[item for item in self._items if item.subset == subset],\n purpose=self.purpose,\n )\n return dataset",
"def __init__(self, collection):\n self.collection = collection",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will generate the index if no_of subset is passed or will pass the subset_list if no_of_subset is None | def getSubsetIndex(self, **parameter_list: dict):
if not self.subset_list:
self.subset_list = random.sample(range(2, 21), self.no_of_subset)
return self.subset_list | [
"def __init__(self, no_of_subset: int = None, subset_list: list = []):\n self.no_of_subset = no_of_subset\n self.subset_list = subset_list\n self.getSubsetIndex()",
"def get_subset_index(subset):\n subset_idx = '_'.join(sorted(set(str(i) for i in subset)))\n return subset_idx",
"def _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will read the required data from inpatient file and concatinate them and return the dataframe | def fetchFromInpatientDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_inpatient_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip",
parse_dates=[
"CLM_FROM... | [
"def fetchFromOutpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_outpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\"CLM_FROM_D... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will read the required data from Prescription Drug Events file and concatinate them and return the dataframe | def fetchFromPrescriptionDrugEventsDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_prescription_drug_event = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_{i}.zip",
parse_dates=["S... | [
"def load_data(self):\n self.event_df = pd.DataFrame({'Time': [0.1, 0.2, 0.3, 0.4, 0.5],\n '1_sig': [1, 2, 3, 4, 5],\n '2_sig': [2, 5, 6, 7, 9]})",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will read the required data from outpatient file and concatinate them and return the dataframe | def fetchFromOutpatientDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_outpatient_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip",
parse_dates=["CLM_FROM_DT", "CLM_THRU_DT... | [
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will read the required data from beneficiary summary file and concatinate them and return the dataframe | def fetchFromBeneficiaryDataset(self, year: int = 2008) -> pd.DataFrame:
assert year in [2008, 2009, 2010], "Incorrect Year Given"
dataframe_list = []
for i in self.subset_list:
data_beneficiary_summary = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_{year}_Beneficiar... | [
"def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Deman... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will read the required data from carrier claims file and concatinate them and return the dataframe | def fetchFromCarrierClaimsDataset(self, claim_type: str = "A") -> pd.DataFrame:
assert claim_type in ["A", "B"], "Incorrect Claim Type Given"
dataframe_list = []
for i in self.subset_list:
data_carrier_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_201... | [
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize observation env depends on observation space type. If observation space (i.e. akro.Image, gym.spaces.Box) is an image, wrap the input of shape (W, H, 3) for PyTorch (N, 3, W, H). | def _initialize_obs_env(self, env):
obs_shape = env.observation_space.shape
if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:
env = TransposeImage(env)
return env | [
"def _init_observation_space(self):\n # Get the observation space from the raw environment\n # NOTE assumes a raw MiniGrid space which is a dictionary whose\n # 'image' entry contains the actual image-encoding observation\n raw_obs_space = self.env.observation_space['image']\n\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the series object for the last series run by this user on this system. | def load_last_series(pav_cfg, errfile: TextIO) -> Union[series.TestSeries, None]:
try:
series_id = series.load_user_series_id(pav_cfg)
except series.TestSeriesError as err:
output.fprint("Failed to find last series: {}".format(err.args[0]), file=errfile)
return None
try:
re... | [
"def load_or_fetch_series(self, symbol: str):\n try:\n df = self.load(symbol)\n except (KeyError, FileNotFoundError):\n df = self.refresh()\n return df[self.time_series]",
"def get_series(self):\n return self.series",
"def latestsltrain(self):\n return se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of SeriesInfo objects based on the args.series attribute. When args.series is empty, default to the 'last' series started by the user on this system. If 'all' is given, search all series (with a default current user/system/1day filter) and additonally filtered by args attributes provied via filters.add_se... | def arg_filtered_series(pav_cfg: config.PavConfig, args: argparse.Namespace,
verbose: TextIO = None) -> List[series.SeriesInfo]:
limit = getattr(args, 'limit', filters.SERIES_FILTER_DEFAULTS['limit'])
verbose = verbose or io.StringIO()
if not args.series:
args.series = ['la... | [
"def get_series():\n\n return Series.query.all()",
"def get_series_list(self):\n series_list = self.dal.get_series()\n return make_response(True, series_list)",
"def get_series(self):\n return self.series",
"def seriesInfo(seriesName):\n token = authenticate()\n authorization = {... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the given files which contain a list of tests (removing comments) and return a list of test names. | def read_test_files(pav_cfg, files: List[str]) -> List[str]:
tests = []
for path in files:
path = Path(path)
if path.name == path.as_posix() and not path.exists():
# If a plain filename is given (with not path components) and it doesn't
# exist in the CWD, check to see ... | [
"def read_tests(path: str) -> List[List[str]]:\n tests = []\n current = []\n with open(asset(path)) as file:\n for line in file:\n if line == \"---\\n\":\n tests.append(current)\n current = []\n else:\n current.append(line)\n\n te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find a collection in one of the config directories. Returns None on failure. | def get_collection_path(pav_cfg, collection) -> Union[Path, None]:
# Check if this collection exists in one of the defined config dirs
for config in pav_cfg['configs'].items():
_, config_path = config
collection_path = config_path.path / 'collections' / collection
if collection_path.exi... | [
"def find_in_collection_by_name(self, collection_or_key, name):\n if type(collection_or_key) is str:\n collection_or_key = self.graph.get_collection(collection_or_key)\n for v in collection_or_key:\n if v.name == name:\n return v\n name += ':0'\n for ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of raw test id's and series id's, return a list of paths to those tests. The keyword 'last' may also be given to get the last series run by the current user on the current machine. | def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]:
if errfile is None:
errfile = io.StringIO()
test_paths = []
for raw_id in req_tests:
if raw_id == 'last':
raw_id = series.load_user_series_id(pav_cfg, errfile)
if raw_id is None:
... | [
"def get_tests_by_id(pav_cfg, test_ids: List['str'], errfile: TextIO,\n exclude_ids: List[str] = None) -> List[TestRun]:\n\n test_ids = [str(test) for test in test_ids.copy()]\n\n if not test_ids:\n # Get the last series ran by this user\n series_id = series.load_user_series_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filter the given tests by raw id. | def _filter_tests_by_raw_id(pav_cfg, id_pairs: List[ID_Pair],
exclude_ids: List[str]) -> List[ID_Pair]:
exclude_pairs = []
for raw_id in exclude_ids:
if '.' in raw_id:
label, ex_id = raw_id.split('.', 1)
else:
label = 'main'
ex_id... | [
"def test_list_filter_id(self):\n # create reports\n models.Report.objects.create(customer=self.customer, start_date=date(2019, 1, 1), end_date=date(2019, 1, 31))\n report_2 = models.Report.objects.create(customer=self.customer, start_date=date(2019, 2, 1), end_date=date(2019, 2, 28))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a list of paths to test run directories, return the corresponding list of tests. | def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO,
exclude_ids: List[str] = None) -> List[TestRun]:
test_pairs = [] # type: List[ID_Pair]
for test_path in test_paths:
if not test_path.exists():
output.fprint(sys.stdout, "No test at path: {}".fo... | [
"def find(port, paths):\n gather_start_time = time.time()\n paths_to_walk = set()\n # if paths is empty, provide a pre-defined list.\n if paths:\n _log.debug(\"Gathering tests from: %s relative to %s\" % (paths, port.layout_tests_dir()))\n for path in paths:\n # If there's an * ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a list of raw test id's and series id's into a list of test objects. | def get_tests_by_id(pav_cfg, test_ids: List['str'], errfile: TextIO,
exclude_ids: List[str] = None) -> List[TestRun]:
test_ids = [str(test) for test in test_ids.copy()]
if not test_ids:
# Get the last series ran by this user
series_id = series.load_user_series_id(pav_cfg)
... | [
"def sids(test_songs):\r\n return [s.sid for s in test_songs]",
"def make_vid_list():\n tests = []\n\n for i in range(1, 5):\n with open(f'data/vid_props/test{i}.csv') as f:\n test = [Video.from_text(l) for l in f.readlines()[1:]]\n tests.append(test)\n\n return tests",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate the name for the set set based on the test input to the run command. | def get_testset_name(pav_cfg, tests: List['str'], files: List['str']):
# Expected Behavior:
# pav run foo - 'foo'
# pav run bar.a bar.b bar.c - 'bar.*'
# pav run -f some_file - 'file:some_file'
# pav run baz.a baz.b foo - 'baz.*,foo'
# pav run foo bar baz bla... | [
"def data_set_name(self) -> str:\n return pulumi.get(self, \"data_set_name\")",
"def test_name(self) -> None:\n return self._test_name",
"def generate_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"generate_name\")",
"def setup_name(self):\n return self._setup_name",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a string quety to get data from a lane and from a list of days. The Query colect info about cars with spanish obu only | def get_sql_query(day_load, lindex):
sz = "select N_Mensaxe_C, N_Estacion_C, N_Via_C, D_Data_C,\
T_Hora_C, Sz_Chave_C, N_Orixen_X, N_Destino_X,\
N_Pago_X, N_Obu_Validez_In, N_Obu_Pago, N_Obu_Estacion,\
D_Obu_Data, T_Obu_Time, N_Obu_Via_Entrada, indice\n\
from... | [
"def query_city():\n\n try: \n locations = pd.read_sql(\"\"\"\n SELECT DISTINCT(event_city)\n FROM ticket_sales;\n \"\"\",\n con=engine)\n \n # removes enclosing brackets of dataframe elements using list slicing and trans... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a probability forecast. | def rem_predicted(self, value):
for bin_ in sorted(self.bins):
if value <= bin_:
self.bins[bin_]['predicted'] -= value/100.0
break | [
"def test_remove_prediction(self):\n # setup\n predictions = self.context.get_predictions_for_test(2, self.session)\n self.session.add_all(predictions)\n self.session.commit()\n\n self.session.query(context.Prediction).filter(\n context.Prediction.timestamp == predictio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions. | def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions ma... | [
"def viterbi_tags(self, logits: torch.Tensor, mask: torch.Tensor, logits_batch_first=False) ->List[Tuple[List[int], float]]:\n if not logits_batch_first:\n logits = logits.transpose(0, 1).contiguous()\n mask = mask.transpose(0, 1).contiguous()\n _, max_seq_length, num_tags = logi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the tag ids to the actual tags. ``output_dict["tags"]`` is a list of lists of tag_ids, so we use an ugly nested list comprehension. | def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
output_dict["tags"] = [
[self.vocab.get_token_from_index(tag, namespace=self.label_namespace)
for tag in instance_tags]
for instance_tags in output_dict["tags"]
]
... | [
"def format_input(dict_pics):\n pictures = dict_pics.values()\n\n pictures_per_tags = defaultdict(set)\n\n for pic in pictures:\n for tag in pic.tags:\n pictures_per_tags[tag].add(pic.id)\n\n pictures_per_tags = dict(pictures_per_tags)\n return pictures_per_tags",
"def getTags(tag... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes in a list of images, along with a name for the output and downloads them into a directory. | def download_image(imageList, name, ddir):
for i, image in enumerate(imageList):
wget.download(image, out= ddir + str(name + '_' +str(i)) + '.jpg') | [
"def download_images(img_urls, dest_dir):\n #print dest_dir, img_urls\n try:\n full_path = os.path.abspath( dest_dir )\n except:\n print '*Directory error:', dirname\n sys.exit(1)\n #print 'full_path: ', full_path\n try:\n if not os.path.exists(full_path) :\n #print 'making directory:', full_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the allocation weight from current usage | def _get_allocation_weight(self, usage):
batch_size = usage.shape[0]
sorted_usage, idx = torch.sort(usage, dim=2)
_, rev_idx = torch.sort(idx, dim=2)
ones = Variable(sorted_usage.data.new(batch_size, 1, 1).fill_(1))
acc_prod_usage = torch.cumprod(
torch.cat((ones, so... | [
"def get_weight(self) -> float:\n return 0",
"def total_weight(self):\n return self.weight_fun(self.graph, self.path)",
"def calc_relative_weight(self):\n relative_weight = self.weight\n for agent in self.agents:\n if relative_weight > 0:\n relative_weight -... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the new temporal link and precedence | def _update_temporal_link_and_precedence(self, state,
transpose_write_weight):
num_cells = state.write_weight.shape[2]
grid_sum = (transpose_write_weight.repeat(1, 1, num_cells) +
state.write_weight.repeat(1, num_cells, 1))
grid_s... | [
"def periodic_link(self):\n\n if(self.gauge=='periodic'):\n return self.link(self.N-2,0)\n if(self.gauge=='relative'):\n return self.link(self.N-2,0,correct_wc=True)",
"def _update_links_tons(self, new_path, od):\n\n old_links = od.links\n new_links = new_path.links\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the usage vector in state | def _update_usage(self, interface, state):
prev_read_weights = state.read_weights
retention_vector = torch.prod(
1 - interface.free_gates * prev_read_weights,
dim=1).unsqueeze(dim=1)
usage = state.usage
usage = ((usage + state.write_weight - usage * state.write_w... | [
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorUS_update(self, *args)",
"def usage_vec(self, f_t, rw_prev, ww_prev, u_prev):\n # psi is the 1xN retention vector\n psi = np.ones_like(rw_prev) - f_t * rw_prev\n psi = np.prod(psi, axis=0)\n # u is the usage vector\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform one read write on the memory Computes the weights from the interface emitted from the controller. | def forward(self, interface, state):
state = self._update_write_weight(interface, state)
write_vector = interface.write_vector
erase_vector = interface.erase_vector
transpose_write_weight = torch.transpose(state.write_weight, 1, 2)
memory = state.memory
memory *= 1 - to... | [
"def __update(self, weights, datasets):\n # acquire write lock\n self.read_write.acquire()\n\n while self.readers > 0:\n self.read_write.wait()\n\n self.weights = utility.averageParam(\n (self.weights, self.datasets),\n (weights, datasets)\n )\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct an ibis table from a pandas DataFrame. | def from_dataframe(
self,
df: pd.DataFrame,
name: str = "df",
client: BasePandasBackend | None = None,
) -> ir.Table:
if client is None:
return self.connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name) | [
"def table_creator(table_name, dataframe, codex=False, id_col=None):\n\n # reformat column names\n dataframe.columns = dataframe.columns.str.lower()\n dataframe.columns = dataframe.columns.str.replace(' ', '_')\n\n # insert 'ids' column at the first column position\n if codex:\n id_col_name_st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the lr to the initial lr decayed by 10 every d epochs | def adjust_learning_rate(optimizer,epoch,model_options,d):
lr = model_options['learning_rate']*(0.1**(epoch//d))
print 'Learning rate: ', lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr | [
"def lr_decay(self):\n\t\tself.lr = self.lr * self.gamma",
"def adjust_learning_rate(self, epoch):\r\n self.lr_current = self.lr_initial * (0.1 ** (epoch // 30))\r\n for param_group in self.optimizer.param_groups:\r\n param_group['lr'] = self.lr_current",
"def adjust_learning_rate(self,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy symlink from file source to file destination or directory check if file destination is a file or a directory, unlink if there is a file already exists | def copy_symlink(file_src, file_dst, new_file):
if os.path.isfile(file_dst):
if os.path.exists(file_dst):
os.unlink(file_dst)
os.symlink(file_src, file_dst)
elif os.path.isdir(file_dst):
if os.path.exists(new_file):
os.unlink(new_file)
os.symlink(file_src,... | [
"def copy_and_symlink(source, destination):\n copyfile(source, destination)\n os.remove(source)\n os.symlink(destination, source)",
"def copy_hardlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.link(f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy hardlink from file source to file destination or directory check if file destination is a file or a directory, unlink if there is a file already exists | def copy_hardlink(file_src, file_dst, new_file):
if os.path.isfile(file_dst):
if os.path.exists(file_dst):
os.unlink(file_dst)
os.link(file_src, file_dst)
elif os.path.isdir(file_dst):
if os.path.exists(new_file):
os.unlink(new_file)
os.link(file_src, new_... | [
"def hardlink(src, dest):\n if exists(dest):\n delete(dest)\n\n try:\n from os import link\n link(compat_path(src), compat_path(dest))\n except (AttributeError, OSError, ImportError):\n return copy(src, dest)\n log(2, \"Hardlink file '{src}' to '{dest}'.\", src=src, dest=dest... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get different position in each file between file source and file destination generate an error if file source doesn't have the read right | def get_diff_position(file_src, file_dst):
diff_position = -1
compare_diff = []
try:
with open(file_src, "r") as src, open(file_dst, "r") as dst:
content_src = src.read()
content_dst = dst.read()
d = difflib.Differ()
compare_diff = list(d.compare(conte... | [
"def fpdiff(filename1,filename2,relative_error,small):\n\n import math\n import gzip\n \n #Open the files\n\n #If the first file is a gzipped file, open it via the gzip module\n try:\n if(filename1.find(\".gz\") != -1):\n F1=gzip.open(filename1)\n #Otherwise open as normal\n else:\n F1=open(filename1);\n #If ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allow customer to signup in application. username, password are properties of UserLogin class. first_name, last_name, phone_num, email_address, and payment are all properties of Customer class. When this method is called, request goes to Customer class to create the customer. At the same time, the user login class is i... | def signupForApplication(self, username, password,
first_name, last_name, phone_num,
email_address, payment=None):
try:
self._customer = Customer.signup(
username, password,
first_name, last_name,
phone_num, ema... | [
"def create_new_user(self):\n name = get_param('What is your name?', self.screen)\n address = get_param('What is your street address?', self.screen)\n city = get_param('What city do you live in?', self.screen)\n state = get_param('What state do you live in?', self.screen)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scheme of SiPM positions (the numbers are the SiPM charges) 1 1 1 1 6 1 1 1 1 1 5 0 1 1 1 This test is meant to fail if either 1) in the case of an empty masked channel list, the actual threshold in the number of SiPMs around the hottest one turns out to be different from msipm 2) the masked channel is not taken proper... | def test_masked_channels():
xs = np.array([0, 0, 0, 1, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2])
ys = np.array([0, 1, 2, 0, 1, 2, 0, 2, 3, 4, 3, 4, 3, 4])
qs = np.array([1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 6, 1, 1, 1])
pos = np.stack((xs, ys), axis=1)
masked_pos = np.array([(2, 1)])
... | [
"def test_choi_is_block_positive():\n mat = choi()\n np.testing.assert_equal(is_block_positive(mat, rtol=0.001), True)\n np.testing.assert_equal(is_block_positive(mat, k=2, rtol=0.001), False)",
"def make_hsrl_mask_simple(qc_mask,molecular_counts,mol_lost_level,i2a_molecular_counts=None):\n\n # np.set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Trivial rootfinding function, sets ``gout[0] = y[0] g_data``. >>> from pysundials import cvode >>> import ctypes >>> gout = cvode.NVector([0.0]) >>> g_data = ctypes.c_float(2.5) >>> g_rtfn_y(0, [0], gout, ctypes.byref(g_data)), gout (0, [2.5]) >>> g_rtfn_y(3, [5], gout, ctypes.byref(g_data)), gout (0, [2.5]) | def g_rtfn_y(t, y, gout, g_data):
import ctypes
gout[0] = y[0] - ctypes.cast(g_data,
ctypes.POINTER(ctypes.c_float)).contents.value
return 0 | [
"def obfn_gvar(self):\n\n if self.opt['gEvalY']:\n return self.Y\n else:\n return self.cnst_A(None, self.Xf) - self.cnst_c()",
"def runge_kutt(y0, x, step):\n y = [y0]\n for i in range(1, len(x)):\n k1 = f(x[i - 1], y[i - 1])\n k2 = f(x[i - 1] + step / 2, y[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Exponential growth equation. >>> t, y, ydot, f_data = 0, [2], [0], None >>> exp_growth(t, y, ydot, f_data); ydot [2] >>> exp_growth(t, y, ydot, f_data, r=3); ydot [6] | def exp_growth(t, y, ydot, f_data, r=1):
ydot[0] = r * y[0] | [
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solution to exponential growth equation. >>> from numpy import arange >>> exp_growth_sol(arange(4), 1).round(2) array([ 1. , 2.72, 7.39, 20.09]) | def exp_growth_sol(t, y0, r=1):
from numpy import exp
return y0 * exp(r * t) | [
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solution to nonsmooth growth example. | def nonsmooth_growth_sol(t, y0):
import numpy as np
t1 = t[t < 1]
t2 = t[t >= 1]
y1 = y0 * np.exp(t1)
y2 = y0 * np.exp(1) / np.exp(t2 - 1)
return np.r_[y1, y2] | [
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def _use_growth_formula(self, min_value, max_value, scale):\n value = ((self.current_level - 1) / (self.max_level - 1)) ** scale\n value *= (max_value - min_value)\n value += min_value\n return value",
"def growth(self, p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Logistic growth equation. >>> t, y, ydot, f_data = 0, [2], [0], None >>> logistic_growth(t, y, ydot, f_data); ydot [2] >>> logistic_growth(t, y, ydot, f_data, r=3); ydot [6] | def logistic_growth(t, y, ydot, f_data, r=1, K=1):
ydot[0] = r * y[0] * (1 - y[0] / K) | [
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def exp_growth_sol(t, y0,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solution to logistic growth equation. >>> from numpy import arange >>> logistic_growth_sol(arange(4), 0.1).round(2) array([ 0.1 , 0.23, 0.45, 0.69]) | def logistic_growth_sol(t, y0, r=1, K=1):
from numpy import exp
ert = exp(r * t)
return K * y0 * ert / (K + y0 * (ert - 1)) | [
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def compute_growth(f, t, period, start, stop, g_scale=80... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constant growth equation. >>> t, y, ydot, f_data = 5, [0], [0], None >>> const_growth(t, y, ydot, f_data); ydot [1] | def const_growth(t, y, ydot, f_data, k=1):
ydot[0] = k | [
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Solution to constant growth equation. >>> from numpy import arange >>> const_growth_sol(arange(4), 0).round(2) array([0, 1, 2, 3]) | def const_growth_sol(t, y0, k=1):
return y0 + k * t | [
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def cal_growth_rate(x, column1, column2, default, jump_value=0):\n if x[column2] == 0:\n return default\n elif x[column2] == jump_value:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Little helper to print out progress numbers in proper format. Nothing gets printed if ``self.quiet`` is ``True``. | def _print_progress(self, progress_num, end="\r"):
# Print out status
if not self.quiet:
print("{}{:>15}".format(self._progress_msg, progress_num),
end=end, file=sys.stderr)
sys.stderr.flush() | [
"def PrintProgress(self):\n ratio = 100*self.progressBar['value'] / self.progressBar['maximum']\n s = '\\033[1K\\r['\n n = math.floor(ratio)\n s += '=' * n\n if n < 100:\n s += '>' + '.'*(100-n-1)\n s += '] {:6.2f} %'.format(ratio)\n print(s, end='')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split a featurevalue pair separated by a colon into a tuple. Also do safe_float conversion on the value. | def _pair_to_tuple(pair, feat_map):
name, value = pair.split(':')
if feat_map is not None:
name = feat_map[name]
value = safe_float(value)
return (name, value) | [
"def _parse_value(self,value):\n value = value.strip()\n if not value:\n return None\n\n # assume that values containing spaces are lists of values\n if len(value.split()) > 1:\n return [self._parse_value(vv) for vv in value.split()]\n\n try:\n # s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A replacement for string.split that won't split delimiters enclosed in quotes. | def split_with_quotes(s, delimiter=' ', quote_char="'", escape_char='\\'):
if PY2:
delimiter = delimiter.encode()
quote_char = quote_char.encode()
escape_char = escape_char.encode()
return next(csv.reader([s], delimiter=delimiter, quotechar=quote_char,
... | [
"def _split_escape(str):\n return [_remove_escapes(x) for x in re.split(r\"(?<!\\\\)\\.\", str)]",
"def smart_split(string : str, delim=',', quotes='\"'):\n if len(quotes)==2 and isinstance(quotes,(list,tuple)):\n start,end=quotes\n quote_parts = []\n i = 0\n last = 0\n in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a ProofStatus for the current state. | def evaluate(self) -> ProofStatus:
if not self.done:
# The board is not yet done.
return ProofStatus.Unknown
# The board has ended, so we must be able to either Prove or Disprove this node.
# Player OR has connected three, indicating this node is proven.
if self.... | [
"def get_status(self) -> Status:\n if not self.solver_called:\n return Status.unsolved\n return pulp_to_malloovia_status(self.pulp_problem.status)",
"def get_status():\n## if n_latches() == 0:\n## return check_sat()\n status = prob_status() #interrogates ABC for the current s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the allocated of this ApimailboxesResources. | def allocated(self, allocated):
self._allocated = allocated | [
"def allocatable(self, value: typing.Union[\"VolumeNodeResources\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n VolumeNodeResources,\n VolumeNodeResources().from_dict(value),\n )\n self._properties[\"allocatable\"] = value",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the unit of this ApimailboxesResources. | def unit(self, unit):
self._unit = unit | [
"def backdoor_set_unit(self, unit):\n\n self.units = Units(unit)",
"def setUnit(self,unit):\n if not isinstance(unit, str):\n raise TypeError, utils.mapping(_(\"Unit ($1) must be a string: $2\"),\n (str(unit), self.__code))\n self.__unit = unit",
"def set_bunit(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a list of all powers of 2 less than or equal to the decimal input | def generate_powers_of_two(decimal_input):
current_power = 1
result = []
while current_power <= decimal_input:
result.insert(0, current_power)
current_power = current_power * 2
return result | [
"def power_list(numbers):\r\n powered_numbers = [\r\n n ** x\r\n for x,\r\n n in enumerate(numbers)\r\n ]\r\n\r\n return powered_numbers",
"def powers(self):\n return [1]",
"def powers_of_two(n):\n\tnum = 1\n\twhile num <= n:\n\t\tyield num\n\t\tnum *= 2",
"def calc_power(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively determines the output binary and gap . | def determine_binary_and_gap(current_target, binary_number_in_progress, largest_gap_yet,
current_size_of_gap, is_gap_right_now, powers_of_two):
current_power_of_two = powers_of_two.pop(0)
if current_target >= current_power_of_two:
current_target = current_target - current_po... | [
"def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setva... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read queries from file | def ReadQuery(filename):
try:
with codecs.open(filename, 'rb', encoding='utf-8') as f:
return f.readlines();
except IOError:
print "Query file not found !"
return None; | [
"def query_from_file(*file_path: str):\n conn, cur = DbManager.get_db()\n queries = read_file(*file_path).split(\"-----\")\n for query in queries:\n cur.execute(query)\n conn.commit()\n cur.close()\n conn.close()",
"def getSqls(file):\n if isinstance(file, i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Close gateway. Do some cleanup before leaving. | def close(self):
for l in self._listeners.itervalues():
l.close()
self._log.info("Exiting gateway...")
logging.shutdown() | [
"def shutdown_gateway(self):\n self.dtn_receive.end_run()\n self.http_action.end_run()\n return",
"async def close(self):\n tasks = []\n for gateway in self.gateways:\n task = Task(gateway.close(), KOKORO)\n tasks.append(task)\n \n await WaitT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a matrix, or 2D list. Row numbers represent from nodes, column numbers represent to nodes. Store the edge values in each spot, and a 0 if no edge exists. | def get_adjacency_matrix(self):
#initialize an empty 2D list
length = len(self.nodes)
matrix = [x[:] for x in [[0]*length]*length]
for edge in self.edges:
fromIndex = self.nodes.index(edge.node_from)
toIndex = self.nodes.index(edge.node_to)
ma... | [
"def get_adjacency_matrix(self):\n l = len(self.nodes) + 1\n edgeArray = np.zeros( (l,l), dtype=np.int)\n #print edgeArray\n for edge in self.edges:\n edgeArray[edge.node_from.value][edge.node_to.value] = edge.value\n return edgeArray.tolist()",
"def get_adjacency_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This lambda function triggered by AWS S3 when any file uploaded to media bucket, and tries to create thumbnails to target bucket with desired sizes. | def lambda_handler(event, context, size=(256, 256)):
s3_client = boto3.client('s3')
for record in event['Records']:
bucket_name = record['s3']['bucket']['name']
object_key = record['s3']['object']['key']
local_key = '/tmp/{}'.format(object_key)
s3_client.download_file(bucket_name... | [
"def lambda_handler(event, context):\n\n\n record_gen = fetch_record(event)\n image_dir = os.environ.get(\"IMAGE_DIR\", \"/tmp\")\n\n client = boto3.client(\"s3\", endpoint_url=os.environ.get(\"S3_ENDPOINT\", None))\n\n try:\n for bucket, objkey in record_gen:\n # downalod\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
noveNastaveni muze byt bud 'cs' nebo 'en' | def zmenLocale(uziv, noveNastaveni):
if noveNastaveni in ['cs','en']:
cultureDict = {'cs':['cs', 'CZ'], 'en':['en', 'US']}
print ("Zmena jazyka uzivatele na: ", noveNastaveni)
uziv.update(culture=cultureDict[noveNastaveni][0], region=cultureDict[noveNastaveni][1])... | [
"def club_locale(club: Club):\n if club.federation.startswith(\"V\"):\n return \"nl\"\n if club.federation.startswith(\"F\"):\n return \"fr\"\n if club.federation.startswith(\"D\"):\n return \"de\"\n return \"nl\"",
"def test_language_translation_translate_deu_to_eng(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
insert_route. Add routing table entry for a specific vRouter | def insert_route(self, match_vRouter_number,
match_ipv4address,
action_dest_mac,
action_egress_port):
entry = shell.TableEntry("MyIngress.ipv4NextHopLPM")(
action="MyIngress.ipv4Forward")
entry.match["vRouterNumber"] = str(match... | [
"def _insert(self, router, distanceVector):\r\n if router not in self.routingTable:\r\n self.routingTable[router] = {}\r\n\r\n dv = self.routingTable[router]\r\n\r\n for destinationRouter, distance, nextHopRouter in distanceVector:\r\n if destinationRouter not in dv:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
insert_vRouter_port_mapping. Assing ingress port to vRouter | def insert_vRouter_port_mapping(self, match_ingress_port, action_vRouter_number):
entry = shell.TableEntry("MyIngress.vRouterNumberMatching")(
action="MyIngress.setVSwitchNumber")
entry.match["standard_metadata.ingress_port"] = str(match_ingress_port)
entry.action["vRouterNumberFrom... | [
"def insert_route(self, match_vRouter_number,\n match_ipv4address,\n action_dest_mac,\n action_egress_port):\n\n entry = shell.TableEntry(\"MyIngress.ipv4NextHopLPM\")(\n action=\"MyIngress.ipv4Forward\")\n entry.match[\"vRouterNum... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
send_bf_shell_command Opens a telnet connection to the p4 target and sends the contents of the provided file to it. | def send_bf_shell_commands(self, telnet_port, port_config_fd):
logging.info("Connecting to {}:{}".format(self.target_ip, telnet_port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.target_ip, telnet_port))
s_fd = s.makefile('rw')
logging.info("Submiting ... | [
"def echo_file_to_transport(self,\n source_file,\n destination_path,\n port=0,\n bytes_per_echo=50):",
"def send_file(self, file, to):\n SendFile.file = file\n SendFile.to = to\n Se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if string is an IP address or not will assume it is a domain otherwise. | def isip(str):
try:
IP(str)
except ValueError:
return False
return True | [
"def _is_ip_address(str):\n try:\n return IPv4Address(str)\n except AddressValueError:\n try:\n return IPv6Address(str)\n except AddressValueError:\n return False",
"def _isip(s):\n if re.search(\"[^0-9.]\", s.strip()): # Try to matc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse scope to expand IP ranges | def genScope(scope_file):
scope = []
try:
with open(scope_file, 'r') as preparse:
for i in preparse:
# Check if there is a -
# Ex: 192.168.1.1-50 becomes 192.168.1.1,192.168.1.50
i = i.rstrip()
if "-" in i:
print(green("[+] {} is a range - expanding...".format(i.rstrip())))
i = i.rstrip... | [
"def ParseInterfaceRanges(self):\n ranges = Session.ExecCommand(\"show configuration interfaces | display set | match interface-range\")\n for line in [l.lower().strip() for l in ranges.splitlines()] :\n try:\n words = line.split(\" \")\n if \"interface-range\" in line :\n if \" me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform an RDAP lookup for an IP address | def runRDAP(domain_ip):
try:
rdapwho = IPWhois(domain_ip)
results = rdapwho.lookup_rdap(depth=1)
return results
except Exception as e:
print(red("[!] Failed to collect RDAP information for {}!").format(domain_ip))
print(red("[!] Error: {}".format(e))) | [
"def lookup_ip(self, ip: str) -> Result(str, Exception):\n\t\ttry:\n\t\t\treturn Result(self.log[ip], None)\n\t\texcept Exception as e:\n\t\t\treturn Result(None, e)",
"def __query_from_dns(ip_address):\n try:\n return socket.gethostbyaddr(ip_address)[0]\n except socket.gaierror:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect various domain information (whois, DNS, RDAP) for the target domain. | def collectDomainInfo(domain, report, verbose):
domain_name = domain
domain_ip = socket.gethostbyname(domain)
try:
report.write("\n---Info for {}---\n".format(domain))
# If entry is a domain, then run whois and try to get the IP address
# Note: IP may return different results because domain may resolve to a lo... | [
"def _record_domain_info(self, a_domain, a_tld, a_file, switch=True):\n\t\texceptions = []\n\t\tdomain_ctypos = self._generate_ctypos_for_domain(a_domain)\n\t\t#first we grab all the content we can via loading up the url\n\t\ttry:\n\t\t\twpg = WebPageInfoGetter(a_domain)\n\t\t\twpg.setUpGetter(a_domain)\n\t\texcept... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collect reputation data from URLVoid for target domain API key required | def urlVoidLookup(domain, report):
if not isip(domain):
try:
if URLVOID_API_KEY is not None:
print(green("[+] Checking reputation with URLVoid"))
report.write("\n---URLVOID Results---\n")
url = "http://api.urlvoid.com/api1000/{}/host/{}".format(URLVOID_API_KEY,domain)
response = requests.get(url)
... | [
"def url(self):\n return \"http://www.reddit.com/r/getmotivated.json?limit=500\"",
"def reputation(self):\n return self._reputation",
"def get_data_from_reaper(self):\n url = 'http://reaper:3300'\n source = requests.get(url)\n self.all_rate = source.json()",
"def process_request... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the ith element of S (Section 3.1) | def elt(S, i):
if i == 0:
return core.first(S)
else:
return elt(core.rest(S), i - 1) | [
"def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp",
"def list_get(s, i):\n if i == 0:\n return s('first')\n else:\n return list_get(s('second'), i-1)",
"def __getitem__(self, i):\n return self._data[i]",
"def __getitem__(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Catenate sequence S with sequence T (Section 3.2) | def cat(S, T):
if not S:
return T
elif not T:
return S
else:
return core.prefix(core.first(S), cat(core.rest(S), T)) | [
"def concat(seqs): # real signature unknown; restored from __doc__\n pass",
"def seqreverseaux(S, T):\n if not S:\n return T\n else:\n return seqreverseaux(core.rest(S), core.prefix(core.first(S), T))",
"def concatv(*seqs): # real signature unknown; restored from __doc__\n pass",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The indexed substition of x for the ith element of S (Exercise 3.23) | def indsubst(x, i, S):
if not S:
return [x]
elif i == 0:
return core.prefix(x, core.rest(S))
elif i > 0:
return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S))) | [
"def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))",
"def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The first indexed substition of the value y with x in sequence S (Exercise 3.28) | def subst1st(x, y, S):
if core.first(S) == y:
return indsubst(x, 0, S)
else:
return core.prefix(core.first(S), subst1st(x, y, core.rest(S))) | [
"def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |