query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
expect l to be a mutation function. | def test_mutation_function(l):
g = graphs.RandomGNP(20, .5)
mutant_graph = l(g)
#print l.__name__
#print mutant_graph.order()
assert mutant_graph.order() == 20 | [
"def mutate(self, individual):\n self.mutation_fun.mutate(individual)",
"def satisfiesF(L):\r\n # Your function implementation here\r\n\r\n # Create a copy of L to iterate over\r\n newList = L[:]\r\n\r\n # Iterate over the copy of the list newList, make any changes to L\r\n for i in newList:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for finding earliest and latest times for a segment in mapped production data | def test_find_segments_and_latest_time(self):
segment_times = overlap_handler.find_time_period_per_segment(self.mapped_data)
self.assertEqual(len(segment_times), 4)
correct_result = {
str(self.mapped_data[1]["segment"]): {"earliest_time": self.mapped_data[0]["time"],
... | [
"def overlap(event_sdt, event_edt):\n#sdt = start date time \n#edt = end date time \n event_sd = event_sdt.date()\n event_ed = event_edt.date()\n event_st = event_sdt.time()\n event_et = event_edt.time()\n desired_sd= arrow.get(flask.session['begin_date']).date()\n desired_ed = arrow.get(flask.ses... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for deleting overlapped production data | def test_delete_overlapped_prod_data(self):
overlap_handler.handle_prod_data_overlap(self.mapped_data)
prod_data = ProductionData.objects.all()
self.assertEqual(len(prod_data), 1) | [
"def test_remove_outdated_prod_data(self):\n filtered_prod_data = overlap_handler.remove_outdated_prod_data(self.segment_times, self.mapped_data)\n\n # Outdated by a large margin\n self.assertNotIn(self.mapped_data[0], filtered_prod_data)\n self.assertNotIn(self.mapped_data[1], filtered_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test for filtering out outdated production data based on the data already in db | def test_remove_outdated_prod_data(self):
filtered_prod_data = overlap_handler.remove_outdated_prod_data(self.segment_times, self.mapped_data)
# Outdated by a large margin
self.assertNotIn(self.mapped_data[0], filtered_prod_data)
self.assertNotIn(self.mapped_data[1], filtered_prod_data)... | [
"def live(self):\n return super(TestDataManager, self).get_query_set().filter(test_flag='l')",
"def test_no_update_fresh_data_bulk(self):\n w0 = Weather.objects.get(pk=6)\n w1 = Weather.objects.get(pk=7)\n w2 = Weather.objects.get(pk=8)\n\n w0.last_modified = self.CURRENT_TIME\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function takes a TensorFlow dataset and splits it into train, validation and test sets. If only a train_frac is specified, the function will return a train set and test set. A train set will always be returned unless the fractions of the validation and test sets sum up to 1. | def train_val_test_split(dataset, train_frac=0, val_frac=0,
test_frac=0):
DATASET_LENGTH = len(list(dataset.as_numpy_iterator()))
assert(train_frac or test_frac or val_frac),\
"specify at least one of the fractions"
assert(train_frac + test_frac + val_frac <= 1),\
... | [
"def _split_train_validation_test(\n self, validation_frac: float, test_frac: float\n ) -> None:\n LOGGER.info(\"Splitting the train, validation and test data\")\n assert 0 < validation_frac + test_frac < 1\n assert validation_frac >= 0 and test_frac >= 0\n test_val_frac = vali... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a image tensor and label for the image found at the specified path. | def process_path(file_path, CLASS_NAMES, im_height, im_width, channels=3):
label = get_label(file_path, CLASS_NAMES)
img = tf.io.read_file(file_path)
img = decode_img(img, im_height, im_width, channels)
return img, label | [
"def obtainDataAsTensors(im_path, im_label):\n\ttransformations = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n\timg = Image.open(im_path)\n\timg = img.convert('RGB')\n\timg = transformations(img)\n\n\tlabel = torch.from_numpy(np.asarray(im_label).reshape([1,1]))\n\n\treturn (img, label)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Makes the dataset ready for use by a model by possibly caching and shuffling it. It will also batch the dataset. Finally, it will also activate prefetching to make the reading of data more efficient. | def prepare_for_model_use(
dataset,
cache=True,
shuffle=True,
shuffle_buffer_size=1000,
batch_size=32,
prefetch=True,
repeat=True,
):
if cache:
if isinstance(cache, str):
dataset = dataset.cache(cache)
else:
dataset = dataset.cache()
if shuff... | [
"def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the queue that corresponds to the given collection. | def get_queue_by_collection(self, collection):
for k, v in self.mapping.items():
if v == collection:
return k
raise KeyError | [
"def get_queue(self, name):\n queue = self.connection.get_queue(name)\n return queue",
"def queue(self):\n from .queue import Queue\n return Queue.load(self.queue_id)",
"def get_queue(self):\n if self.queue is not None:\n return self.queue\n from timon.state ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AMQP consumer function, that inserts an `IncomingMessage`'s jsonloaded body in a MongoDB collection based on the source exchange. | async def store(self, message: IncomingMessage) -> None:
async with message.process():
try:
result = await self._db[self.mapping[message.exchange]].insert_one(
json.loads(message.body)
)
self.logger.info(result)
except E... | [
"def _on_message(self, *args):\n\n parsed_message = self._parse_message(args[0])\n\n if parsed_message is None:\n logging.debug((\"Could not parse message: %s\", args[0]))\n return\n\n logging.debug(\"Received message: %s\", parsed_message)\n\n parsed_message = self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ``wikitext`` with all wikimedia markup templates removed, where templates are identified by opening '{{' and closing '}}'. | def _remove_templates(wikitext):
pieces = []
cur_idx = 0
for s, e in _get_delimited_spans(wikitext, open_delim='{{', close_delim='}}'):
pieces.append(wikitext[cur_idx: s])
cur_idx = e
return ''.join(pieces)
# below is gensim's solution; it's slow...
# n_openings = 0
# n_closi... | [
"def removeTemplates(oldtext=u''):\n result = re.sub(\n r'{{\\s*([Uu]ncat(egori[sz]ed( image)?)?|[Nn]ocat|[Nn]eedscategory)[^}]*}}',\n u'', oldtext)\n result = re.sub(u'<!-- Remove this line once you have added categories -->',\n u'', result)\n result = re.sub(r'\\{\\{\\s*[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace internal links of the form ``[[title |...|label]]trail`` with just ``label``. | def _replace_internal_links(wikitext):
pieces = []
cur_idx = 0
for s, e in _get_delimited_spans(wikitext, '[[', ']]'):
link_trail = re_link_trails.match(wikitext, pos=e)
if link_trail is not None:
end = link_trail.end()
link_trail = link_trail.group()
else:
... | [
"def _replace_external_links(wikitext):\n pieces = []\n cur_idx = 0\n for match in re_ext_link.finditer(wikitext):\n content = match.group(1)\n space_idx = content.find(' ')\n label = content[space_idx + 1:] if space_idx > 0 else content\n pieces.append(wikitext[cur_idx: match.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace external links of the form ``[URL text]`` with just ``text`` if present or just ``URL`` if not. | def _replace_external_links(wikitext):
pieces = []
cur_idx = 0
for match in re_ext_link.finditer(wikitext):
content = match.group(1)
space_idx = content.find(' ')
label = content[space_idx + 1:] if space_idx > 0 else content
pieces.append(wikitext[cur_idx: match.start()])
... | [
"def add_hyperlinks(text: str, url: URL, arg: str) -> str:\n return re.sub(r'\\[.*?\\]', lambda m: to_hyperlink(m, url, arg), text)",
"def fix_blog_links(text):\n return re_blog_url.sub(fix_blog_link, text)",
"def _replace_internal_links(wikitext):\n pieces = []\n cur_idx = 0\n for s, e in _get_d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run worker doing all sorts of background work | def run_worker():
from asu.utils.garbagecollector import GarbageCollector
from asu.utils.boss import Boss
from asu.utils.updater import Updater
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
log.info("start garbage collector")
gaco = GarbageCollector()
gaco.... | [
"def runworker():\n app.run(debug=False)",
"def start_workers(self):\n for worker in self.workers:\n worker.start()",
"async def run(self, workerAction: WorkerAction) -> None:\n for worker in self.workers:\n await workerAction(worker)",
"def start_workeres(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build all profiles of openwrt latest stable | def build_all():
for profile in database.get_all_profiles(
"openwrt", config.get("openwrt").get("latest")
):
target, profile = profile
params = {
"distro": "openwrt",
"version": config.get("openwrt").get("latest"),
"target": target,
"profil... | [
"def _build_profile(self):\n self.setDriver('GV7', 4)\n # This writes all the profile data files and returns our config info.\n wrote_profile = False\n try:\n config_data = write_profile(LOGGER,self.hubs)\n wrote_profile = True\n except (Exception) as err:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build image with worker package preinstalled | def build_worker():
log.info("build worker image")
packages = [
"bash",
"bzip2",
"coreutils",
"coreutils-stat",
"diffutils",
"file",
"gawk",
"gcc",
"getopt",
"git",
"libncurses",
"make",
"patch",
"per... | [
"def build_image(image=image_tag):\n local(f'docker build -t {image} . --build-arg PORT={gunicorn_port}')",
"def build_executables():\n con = Container(\"python:3.6-alpine\")\n con.execute_sh(\"apk update -q\")\n con.execute_sh(\"apk add -q git\")\n con.execute_sh(\"mkdir /source\")\n con.copy_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert package transformation in database | def insert_transformations(distro, version, transformations):
for package, action in transformations.items():
if not action:
# drop package
# print("drop", package)
database.insert_transformation(distro, version, package, None, None)
elif isinstance(action, str):
... | [
"def insert(self, sql):",
"def add_to_package(**context): \n datafiles = context['task_instance'].xcom_pull(task_ids='write_to_json')\n p = t4.Package()\n # Add datafiles\n for df in datafiles:\n p = p.set(\n df,\n f\"{os.getcwd()}/{df}\",\n meta=f\"Add sourc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
FloydWarshall's All pairs shortest path algorithm. Returns n x n shortest path lengthes array and a n x n largest interior vertex array such that (i,j)entry contains the largest vertex index among all interior vertices forming the shortest path from vertex i to vertex j. If the shortest path from vertex i to vertex j i... | def FloydWarshall(G):
# We only need 2 n x n arrays to hold shortest path values.
A0 = [[float('inf') for _ in xrange(G.numVerts)]
for _ in xrange(G.numVerts)]
A1 = copy.deepcopy(A0)
# We need 1 n x n array to hold largest interior vertex index.
IntV = [[-1 for _ in xrange(G.numVerts)]
... | [
"def shortets_path(self, graph):\r\n calc = networkx.floyd_warshall(graph)\r\n return calc",
"def all_pairs_shortest_path(adj_matrix):\n dist_matrix = deepcopy(adj_matrix)\n n_nodes = len(dist_matrix)\n for i in range(n_nodes):\n for j in range(n_nodes):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reconstructs all path shortest paths from values returned by FloydWarshall. Returns ((tail_vertex, head_vertex), path_cost, path_list) for each pairs of vertices in the graph. >>> G = Graph.loadFromFile('g0.txt', True) >>> SP, IntV = FloydWarshall(G) >>> reconstructFM(G, IntV) | def reconstructFM(G, IntV):
def SP(i, j):
# Reconstruct shortest path from vertex i to vertex j.
if i == j: return []
V = IntV[i][j]
if V == -1:
return [i, j]
else:
return SP(i, V) + SP(V, j)[1:]
for i in xrange(len(IntV)):
for j in xrang... | [
"def FloydWarshall(G):\n # We only need 2 n x n arrays to hold shortest path values.\n A0 = [[float('inf') for _ in xrange(G.numVerts)]\n for _ in xrange(G.numVerts)]\n A1 = copy.deepcopy(A0)\n # We need 1 n x n array to hold largest interior vertex index.\n IntV = [[-1 for _ in xrange(G.num... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Johnson's All pairs shortest path algorithm. Returns n x n shortest path lengthes array and a n x n largest interior vertex array such that (i,j)entry contains the largest vertex index among all interior vertices forming the shortest path from vertex i to vertex j. If the shortest path from vertex i to vertex j is a di... | def Johnson(G):
# Modify Graph by adding a source vertex that connects
# to all vertices in G with edge cost of 0
#print "Adding an extra source vertex ..."
Vsrc = G.numVerts
Gx = Graph(G.numVerts+1, G.numEdges+G.numVerts)
for i in xrange(G.numEdges):
v1, v2 = G.getEdge(i)
cost =... | [
"def FloydWarshall(G):\n # We only need 2 n x n arrays to hold shortest path values.\n A0 = [[float('inf') for _ in xrange(G.numVerts)]\n for _ in xrange(G.numVerts)]\n A1 = copy.deepcopy(A0)\n # We need 1 n x n array to hold largest interior vertex index.\n IntV = [[-1 for _ in xrange(G.num... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate a new selector, dynamicly for the window size. | def get_selector(selection):
width = max(30, (term.width/2) - 10)
xloc = min(0, (term.width/2)-width)
selector = Selector (yloc=term.height-1, xloc=xloc, width=width,
left='utf8', right='cp437')
selector.selection = selection
return selector | [
"def build_shape_selector(self):\n\n frame = self.shape_selector\n\n shapes = [f for f in S.SHAPES.values()]\n shape = self.vars[\"shape\"] = tk.StringVar()\n shape.trace(\"w\", self.update_shape_entry)\n\n # set to shape of current coin\n if self.coin:\n current... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A commandline interface for synchronising the Up neobank with the budgeting app You Need A Budget. You can use the below options to specify the respective API keys for each command, but see $ up2ynab check help for details on the recommended way of configuration using environment variables. | def cli(ctx, up_api_token, ynab_api_token):
ctx.obj = {
"up_token": up_api_token,
"ynab_token": ynab_api_token,
"echo_manager": EchoManager(),
} | [
"def cli():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-U', '--cot_url', help='URL to CoT Destination.',\n required=True\n )\n parser.add_argument(\n '-K', '--fts_token', help='FreeTAKServer REST API Token.'\n )\n parser.add_argument(\n '-S', '--... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Your goal in this kata is to implement a difference function, which subtracts one list from another and returns the result. It should remove all values from list a, which are present in list b. array_diff([1,2],[1]) == [2] | def array_diff(a, b):
return list(filter(lambda x: x not in b, a)) | [
"def difference(a, b):\n return [x for x in a if x in a and not x in b]",
"def array_dif_v1(first_array, second_array):\n result = [element for element in first_array if not second_array.count(element)]\n\n return result",
"def shallow_diff_lists(a, b, compare):\n assert isinstance(a, list) and isin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply rc environment. Will download from GitHub if not found in .rc4me/ Replaces rc files in rc4me home directory with symlinks to files located in target repo. If the target repo does not exist in the rc4me home directory, the repo is cloned either locally or from GitHub. | def apply(ctx: Dict[str, RcManager], repo: str):
rcmanager = ctx.obj["rcmanager"]
# Init repo variables
logger.info(f"Getting and setting rc4me config: {repo}")
# Clone repo to rc4me home dir or update existing local config repo
rcmanager.fetch_repo(repo)
# Wait to relink current until after fet... | [
"def refresh_source(options):\n cd(options.source, options.dry_run)\n if options.update:\n update_existing_repo(options.dry_run)\n else:\n clone_repo(options.dry_run)",
"def select(ctx: Dict[str, RcManager]):\n # Init rc4me directory variables\n rcmanager = ctx.obj[\"rcmanager\"]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Revert to previous rc4me configuration. Removes changes from most recent rc4me command and reverts to previous configuration. | def revert(ctx: Dict[str, RcManager]):
# Init rc4me directory variables
rcmanager = ctx.obj["rcmanager"]
logger.info("Reverting rc4me config to previous configuration")
rcmanager.change_current_to_prev() | [
"def reset(ctx: Dict[str, RcManager]):\n # Init rc4me directory variables\n rcmanager = ctx.obj[\"rcmanager\"]\n logger.info(\"Restoring rc4me config to initial configuration\")\n rcmanager.change_current_to_init()",
"def restore_config():\n global sg_kwargs\n sg_kwargs = kwargs_backup.copy()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset to initial rc4me configuration. Restores the rc4me destination directory rc files to the user's initial configuration. If any files were overwritten by rc4me at any point, they will be copied back into the rc4me destination directory. | def reset(ctx: Dict[str, RcManager]):
# Init rc4me directory variables
rcmanager = ctx.obj["rcmanager"]
logger.info("Restoring rc4me config to initial configuration")
rcmanager.change_current_to_init() | [
"def revert(ctx: Dict[str, RcManager]):\n # Init rc4me directory variables\n rcmanager = ctx.obj[\"rcmanager\"]\n logger.info(\"Reverting rc4me config to previous configuration\")\n rcmanager.change_current_to_prev()",
"def btnRestoreClicked(self):\n pyzo.resetConfig()\n shutil.copyfile(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select rc4me configurations. Displays all available repos and allow user to select one | def select(ctx: Dict[str, RcManager]):
# Init rc4me directory variables
rcmanager = ctx.obj["rcmanager"]
my_repos = rcmanager.get_rc_repos()
# Show all dirs that aren't curr/prev
title = "Please select the repo/configuration you want to use:"
options = list(my_repos.keys())
selected, _ = pic... | [
"def get_repositories():\n\n username = userEntry.get()\n organization = orgEntry.get()\n password = passEntry.get()\n\n if username == \"\":\n messagebox.showinfo(\"Missing Username\", \"Please enter your GitHub account username in the field provided.\")\n return\n if organization == \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests utils.get_current_tenant() for correctness. | def test_get_current_tenant(public_tenant, test_tenants):
with compat.schema_context(compat.get_public_schema_name()):
tenant = utils.get_current_tenant()
assert tenant == public_tenant
tenant = test_tenants.first()
with compat.schema_context(tenant.schema_name):
current_tenant = u... | [
"def get_current_tenant():\n return getattr(_thread_locals, 'tenant', None)",
"def get_tenant(request):\n if hasattr(request, 'tenant'):\n return request.tenant\n return None",
"def test_process_get_tenant(self):\n error, out = self.process_get_tenant()\n for err in error: assert e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests duplicate public tenant error. | def test_duplicate_tenant_url(tenant_user):
with pytest.raises(ExistsError, match='Public tenant already exists'):
utils.create_public_tenant('domain.com', tenant_user.email) | [
"def test_process_create_tenant(self):\n error, out = self.process_create_tenant()\n for err in error: assert err == 0",
"def test_process_get_tenant(self):\n error, out = self.process_get_tenant()\n for err in error: assert err == 0",
"def test_process_add_system_tenant(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests utils.fix_tenant_url() for correctness. This utility function is only applicable to djangotenantschemas. | def test_fix_tenant_urls(public_tenant, test_tenants):
new_domain = 'new-pytest-domain.com'
if compat.TENANT_SCHEMAS:
utils.fix_tenant_urls(new_domain)
public_tenant.refresh_from_db()
assert new_domain == public_tenant.domain_url
assert new_domain in test_tenants.first().domain... | [
"def _normalize_base_url(_base_url):\n _base_url = _base_url[:-1] if _base_url.endswith('/') else _base_url\n _base_url = f\"https://{_base_url}\" if not _base_url.startswith('http') else _base_url\n return _base_url",
"def make_entity_base_url(url):\n return url if url.endswith(\"/\") else url + \"/\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap all admin views in a transaction. | def admin_view(self, *args, **kwargs):
wrapped = super(AdminSite, self).admin_view(*args, **kwargs)
return xact.xact(wrapped) | [
"def create_transaction(self):\n from django.db import transaction\n transaction.enter_transaction_management()\n return transaction",
"def views(self):\n yield self.sql_create_view",
"def run_in_transaction(func):\n def wrapper(*args, **kwargs):\n return db.run_in_transact... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates linear kernel u.T v. | def linear_kernel(u, v):
return np.dot(u.T, v) | [
"def polynomial_kernel(u, v, p=3):\n c = 1\n return (np.dot(u.T, v) + c) ** p",
"def linear_kernel(x, y, b=1):\n return x @ y.T + b #note the @ operator for matrix multiplication",
"def linear_kernel(x1, x2):\n\n return np.dot(x1, x2)",
"def _kernel1d(self, u):\n return 1. - np.abs(u)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates polynomial kernel wih degree equal to p. | def polynomial_kernel(u, v, p=3):
c = 1
return (np.dot(u.T, v) + c) ** p | [
"def polyFeatures(X, p):\n X_poly = np.zeros((X.size, p))\n for i in range(p):\n X_poly[:, [i]] = X**(i+1)\n return X_poly",
"def poly_kernel(coeffs, length=601):\n samples = np.arange(length)\n polyvec = np.polyval(coeffs, samples)\n poly_kernel = kernel_from_kernel_stepvec(polyvec)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
sentence = ["hello","how","are","you"] words = ["hi","hello","I","you","bye","thank","cool"] bog =[ 0 , 1 , 0 , 1 , 0 , 0 , 0] | def bag_of_words(tokenized_sentence, all_words):
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype=np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag | [
"def bag_of_words(sentence, all_words):\n bag = np.zeros(len(all_words), dtype=np.float32)\n for (index, word) in enumerate(sentence):\n if word in all_words:\n bag[all_words.index(word)] = 1\n return bag",
"def Dummy(word,y):\n Dummy = []\n for i in y:\n if i == word:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get current cachekey name based on random generated shorten url (to be used in redis cache) | def get_cache_key(instance, extra=None):
return '%s.%s.%s' % (instance.__class__.__name__, instance.short_url, extra) if extra else '%s.%s' % (instance.__class__.__name__, instance.short_url) | [
"def cache_key(self, url):\n\n return f\"IXF-CACHE-{url}\"",
"def _get_cache_key(r: WSGIRequest, c: BaseCache) -> str:\n r = _chop_querystring(r)\n r = _chop_cookies(r)\n return get_cache_key(r, None, r.method, c)",
"def short_url(self):\n tiny_url = ''.join(random.choice(BASE_KEY) for x ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Policy label type = "HTTP | OTHERTCP" | def set_policylabeltype(self, policylabeltype):
valid_types = ('HTTP', 'OTHERTCP')
if policylabeltype and policylabeltype not in valid_types:
raise ValueError("policylabeltype must be one of %s" %
",".join(valid_types))
self.options['policylabeltype'] = p... | [
"def create_fw_policy(self,name):",
"def set_policy(self, policy):\n self._policy = 'custom'\n self._P = policy",
"def policy():\n return render_template('dashboard/policy.html', tagname = 'policy')",
"def __load_policy__(self, agent, policy):\n msg = comm.PolicyMessage(agent_id=agent.agent_id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this API to fetch responderpolicylabel resource of given name. | def get(nitro, responderpolicylabel):
__responderpolicylabel = NSResponderPolicyLabel()
__responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())
__responderpolicylabel.get_resource(nitro)
return __responderpolicylabel | [
"def update(nitro, responderpolicylabel):\n __responderpolicylabel = NSResponderPolicyLabel()\n __responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())\n __responderpolicylabel.set_policylabeltype(responderpolicylabel.get_policylabeltype())\n __responderpolicylabel.set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this API to fetch all configured responderpolicylabel resources. | def get_all(nitro):
__url = nitro.get_url() + NSResponderPolicyLabel.get_resourcetype()
__json_cspolicies = nitro.get(__url).get_response_field(NSResponderPolicyLabel.get_resourcetype())
__responderpolicies = []
for json_responderpolicylabel in __json_cspolicies:
__responderp... | [
"def get(nitro, responderpolicylabel):\n __responderpolicylabel = NSResponderPolicyLabel()\n __responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())\n __responderpolicylabel.get_resource(nitro)\n return __responderpolicylabel",
"def get_available_labels( self ):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this API to add responderpolicylabel. | def add(nitro, responderpolicylabel):
__responderpolicylabel = NSResponderPolicyLabel()
__responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())
__responderpolicylabel.set_policylabeltype(responderpolicylabel.get_policylabeltype())
__responderpolicylabel.set_priority(re... | [
"def update(nitro, responderpolicylabel):\n __responderpolicylabel = NSResponderPolicyLabel()\n __responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())\n __responderpolicylabel.set_policylabeltype(responderpolicylabel.get_policylabeltype())\n __responderpolicylabel.set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this API to delete responderpolicylabel of a given name. | def delete(nitro, responderpolicylabel):
__responderpolicylabel = NSResponderPolicyLabel()
__name = responderpolicylabel.get_labelname()
__responderpolicylabel.set_labelname(__name)
nsresponse = __responderpolicylabel.delete_resource(nitro, __name)
return nsresponse | [
"def delete_label(self, label_name: str) -> dict:\n label = self.get_labels(filter_by_name=[label_name])\n if len(label) != 1:\n raise Exception(f\"Could not find label to delete with name {label_name}\")\n id = label[0][\"id\"]\n\n return self.client.delete(id, endpoint=\"lab... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use this API to update a responderpolicylabel of a given name. | def update(nitro, responderpolicylabel):
__responderpolicylabel = NSResponderPolicyLabel()
__responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())
__responderpolicylabel.set_policylabeltype(responderpolicylabel.get_policylabeltype())
__responderpolicylabel.set_priority... | [
"def get(nitro, responderpolicylabel):\n __responderpolicylabel = NSResponderPolicyLabel()\n __responderpolicylabel.set_labelname(responderpolicylabel.get_labelname())\n __responderpolicylabel.get_resource(nitro)\n return __responderpolicylabel",
"def add(nitro, responderpolicylabel):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test getting both the calendar and event list via ajax. | def test_ajax_month_calendar_and_event_list(self):
response = self.client.get(
reverse('calendar:cal_and_list_shift'),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response['Content-Type'], 'application/json')
data = loads(response.content.decode('ut... | [
"def test_ajax_day_view(self):\n response = self.client.get(\n reverse(\n 'calendar:day_list',\n kwargs={'year': '2015', 'month': '2', 'day': '2'}\n ),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n self.assertEqual(response['Conten... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test sending an ajax request to day view. | def test_ajax_day_view(self):
response = self.client.get(
reverse(
'calendar:day_list',
kwargs={'year': '2015', 'month': '2', 'day': '2'}
),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response['Content-Type'], 'ap... | [
"def test_open_requests_page(self):\n url = reverse_lazy('hello:requests')\n resp = self.client.get(url)\n self.assertEqual(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'requests.html')\n self.assertEqual(HttpRequest.objects.filter(is_read=False).count(), 0)\n res... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return attributes that are dependent on the parent channels. This is required during a slim operation (reducing number of channels). Returns set (str) | def channel_dependent_attributes(self):
attributes = super().channel_dependent_attributes
attributes.add('amplitudes')
attributes.add('amplitude_weights')
return attributes | [
"def _get_physical_channels(self):\n return self.__physical_channels",
"def _read_parents(self):\n return set()",
"def get_attribute():\n return set(c+\".\"+cmds.attributeName(c+\".\"+b, l=True)\n for a in \"msho\"\n for b in cmds.channelBox(\"mainChannelBox\", q=True, **{\"s%sa\"%a:True}... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the profile for given channel(s). | def update_profile(self, channels=None):
self.calc_mean_amplitudes(channels=channels)
self.whiten_profile(channels=channels) | [
"def update_channel(self, channel):",
"def update_profile(self):\n pass",
"def profile_update(self, profile):\n # warn about missing install_json parameter\n if profile.get('install_json') is None:\n print(\n '{}{}Missing install_json parameter for profile {}.'.for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the mean amplitudes in the window of a spectrum. The amplitude of the FFT spectrum is calculated as the mean value within a given window (usually 1). The weight of the mean operation will also be stored in the `amplitude_weights` attribute. These are used later to calculate the channel profiles. | def calc_mean_amplitudes(self, channels=None):
if channels is None or channels is self.channels:
channel_indices = np.arange(self.channels.size)
else:
channel_indices = self.channels.find_fixed_indices(
channels.fixed_index)
if self.channel_profiles is No... | [
"def average_amplitude_initial(data, peakind, pushup_window, feature, freq):\n\n ind = [int(x*freq) for x in peakind]\n ind = [pushup_window[0] + x for x in ind]\n amps = data.ix[ind][feature]\n avg_amp = amps.mean()\n return avg_amp",
"def calculate_average_weighted_mean(flux,err,norm_const):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the channel filtering profiles for whitening. Will also set channel 1/f noise statistics. | def whiten_profile(self, channels=None):
log.debug("Whitening channel profile.")
if channels is None or channels is self.channels:
channel_indices = np.arange(self.channels.size)
else:
channel_indices = self.channels.find_fixed_indices(
channels.fixed_inde... | [
"def update_profile(self, channels=None):\n self.calc_mean_amplitudes(channels=channels)\n self.whiten_profile(channels=channels)",
"def preprocess_filters(x, Fs):\n # Low pass at 200Hz\n x_lo = neurodsp.filter(x, Fs, 'lowpass', f_lo=200, N_seconds=.1)\n\n # Highpass at 2Hz - figure out ord... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all global version managers of a type. | def get_global_version_managers():
return TypeVersionManager.get_global_version_managers() | [
"def get_global_version_managers(_cls=True):\n return TypeVersionManager.objects.filter(user=None).all()",
"def get_all_version_manager():\n return TypeVersionManager.get_all_type_version_manager()",
"def get_version_managers_by_user(user_id):\n return TypeVersionManager.objects.filter(user=str... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all global version managers of a user. | def get_version_managers_by_user(user_id):
return TypeVersionManager.get_version_managers_by_user(user_id) | [
"def get_global_version_managers(_cls=True):\n return TypeVersionManager.objects.filter(user=None).all()",
"def get_version_managers_by_user(user_id):\n return TypeVersionManager.objects.filter(user=str(user_id)).all()",
"def get_global_version_managers():\n return TypeVersionManager.get_global... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get list of available types not inside a bucket. | def get_no_buckets_types():
# build list of types
bucket_types = []
for bucket in bucket_api.get_all():
bucket_types += bucket.types
all_types = get_global_version_managers()
no_bucket_types = [type_version_manager for type_version_manager in all_types
if type_version... | [
"def getBookableObjectTypes(self, **kwargs):\n\n brains = self.getBookableObjectBrains(**kwargs)\n brain_types = [x.getType for x in brains]\n type_vocab = self.getTypeDisplayList()\n types = [x for x in type_vocab.keys() if x in brain_types]\n types.sort()\n return types",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Type Version Managers of all users except user with given user id. | def get_all_version_manager_except_user_id(user_id):
return TypeVersionManager.get_all_type_version_manager_except_user_id(user_id) | [
"def get_version_managers_by_user(user_id):\n return TypeVersionManager.objects.filter(user=str(user_id)).all()",
"def get_all_version_manager_except_user_id(user_id, _cls=True):\n queryset = super(TemplateVersionManager, TemplateVersionManager).get_all_version_manager_except_user_id(user_id)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all Type Version Managers of all users. | def get_all_version_manager():
return TypeVersionManager.get_all_type_version_manager() | [
"def get_version_managers_by_user(user_id):\n return TypeVersionManager.objects.filter(user=str(user_id)).all()",
"def get_version_managers_by_user(user_id):\n return TypeVersionManager.get_version_managers_by_user(user_id)",
"def get_global_version_managers(_cls=True):\n return TypeVersionMana... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A utility method for parsing token keyword arguments. | def token_kwargs(bits, parser, support_legacy=False):
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
k... | [
"def parse_args_kwargs(parser, token):\n bits = token.contents.split(' ')\n\n if len(bits) <= 1:\n raise template.TemplateSyntaxError(\"'%s' takes at least one argument\" % bits[0])\n\n if token.contents[13] == '\"':\n end_quote = token.contents.index('\"', 14) + 1\n args = [template.V... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Hooks this module up to the given api. | def hookup(self, api):
# assert not hasattr(api, self.module_name), """
# '{}' conflicts with existing attribute
# """.format(self.module_name)
self.api = api
if not hasattr(api, self.module_name):
setattr(api, self.module_name, self.execute) | [
"def init_api(self, api: Eve) -> None:\n if not isinstance(api, Eve):\n raise TypeError(api)\n if self._hooks is not None:\n # add event hooks to the api\n self.hooks.init_api(api)\n\n # register the domain with the api.\n api.config['DOMAIN'].setdefault(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Puts args and kwargs in a way ansible can understand. Calls ansible and interprets the result. | def execute(self, *args, **kwargs):
assert self.is_hooked_up, "the module should be hooked up to the api"
print("============")
print(args)
print("============")
print(kwargs)
self.module_args = module_args = self.get_module_args(args, kwargs)
print("=========")
... | [
"def construct_ansible_facts(response, ansible_params, paramgram, *args, **kwargs):\n\n facts = {\n \"response\": response,\n \"ansible_params\": scrub_dict(ansible_params),\n \"paramgram\": scrub_dict(paramgram),\n }\n\n if args:\n facts[\"custom_arg... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scrapes search page for individual parsing links to feed into threadbot system (not needed if pages in url) | def searchpageparsing(page): # Note for initial Coldwell this was run seperately, for more managable errors
if not page: # Failed webdl handling
return None
scrapelist = []
soup = bs4.BeautifulSoup(page.text, 'lxml')
parent_element = soup.find('a', {'id': 'resultsNext'})
while parent... | [
"def parse(self, response):\n # Grab all the job posting urls\n for sel in response.xpath('//h2[@class=\"jobtitle\"]'):\n posting_url, job_location = self.get_selection_info(sel)\n try:\n self.jentries.append(scrape_job_posting(posting_url, loc=job_location))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Group two variants with same genomic coordinates. | def group_alleles(data1, data2):
if (data1['chrom'], data1['pos'], data1['ref'], data1['alt']) != (data2['chrom'], data2['pos'], data2['ref'], data2['alt']):
raise ValueError("data1 variant id != data2 variant id: %s != %s" % (data1, data2))
combined_data = data1 # this sets defaults, now we fix it:
... | [
"def add_seq_variants(self, allele1_seq, allele2_seq):\n\n self.allele1_region_seq = allele1_seq\n self.allele2_region_seq = allele2_seq\n\n bases_different = [base1!=base2 for base1, base2 in zip(allele1_seq.seq, allele2_seq.seq)]\n variant_base_positions = [pos for pos, different in en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get list of components by ip | def list_components(ip, path='/', login=login, password=password):
cmd = 'ssx2 -c "connect {ip} {login} {password};ls \'{path}\';disconnect"'.format(ip=ip, login=login, password = password, path=path)
return [x.strip() for x in os.popen(cmd).readlines() if x.strip() not in ['', None]] | [
"def get_ip_list(prefix):\n return list(map(lambda x: str(x),ipaddress.ip_network(prefix).hosts()))",
"def get_host_list(self):",
"def list_zeus_cluster_records_via_ip(cfg, ip, port=False):\n\n idlist = {}\n q = cfg.dbsess.query(ZeusCluster).\\\n filter(or_(ZeusCluster.ip==ip,\n ZeusClust... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dict with all tasks. | def _tasks(self):
return {k: v for k, v in self.__tasks.items()
if k.startswith(self.task_prefix)} | [
"def get_tasks(self):\n ret = self.tasks.values()\n return ret",
"def tasks(self):\n return self._tasks",
"def getTasksInformation(self):\n if self.taskLock.acquire():\n try:\n self.__checkTasks()\n return [item.getDict() for item in self.tasks]\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of task names without task_prefix. | def _short_task_names(self):
return [name[len(self.task_prefix):] for name in self._task_names] | [
"def extract_non_system_names(names_list,prefix='__'):\n result = list();\n ns = len(prefix);\n for name in names_list:\n pend = min(ns,len(name))\n if name[:pend] != prefix:\n result.append(name)\n return result",
"def removeTasks(self, taskNames):",
"def _get_gradle_tasks(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test cases for BaseExercises class. | def testBaseExercises():
class TestExercises(BaseExercises):
def task_with_underline(self):
print('task_with_underline')
def task32(self):
print('numerated tasks')
def task(self):
print('task')
def task_sql(self):
print(self.ex("sel... | [
"def test_exercise_name(self):\n self.assertEqual(str(self.exercise), \"soccer\")\n self.assertEqual(str(self.exercise2), \"yoga\")",
"def main():\n # Print the docstring at the top of the file so your instructor can see your name.\n print( __doc__ )\n\n # Call each individual exercise; com... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to retrive the SDApplication | def getApp() -> SDApplication:
context = sd.getContext()
return context.getSDApplication() | [
"def getApplication():",
"def active_directory_application_get(app_name: str) -> ActiveDirectoryApplication:\n command: List[str] = ['az', 'ad', 'app', 'list', f'--query=[?displayName==\"{app_name}\"] | [0]']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to retrive the QtPythonUIManager | def getUiManager():
return getApp().getQtForPythonUIMgr() | [
"def get_ui_config():\n\n return db.get_db().getRoot().getS(ns_ui.uiConfig, rdf.Type(ns_ui.UiConfig))",
"def get_manager() -> typing.Union[QMainWindow, None]:\n Log.debug(\"Retrieving screen manager\")\n for widget in QApplication.instance().topLevelWidgets():\n if \"ScreenManager\" in repr(widget... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Go over input linebyline, filebyfile and perform regex search on it, append results for later use. Using builtin function 'fileinput' which brings input from file(s) if given, or stdin if empty or '' | def _search_input(self):
try:
for line in fileinput.input(files=self.parsed_elements.args.files_names, mode='r'):
res = self.regex.search_regex_in_data(line)
for item in res:
self.results.append({'file_name': fileinput.filename(), 'no_line': filein... | [
"def process_file(file_path):\n file_of_matches=open(file_path, \"r\")\n #loop over every line to get process individual matches\n for match in file_of_matches:\n process_match(match[:-1])#drop the \\n from end of line \n file_of_matches.close()",
"def parse_input(args):\n for line in filein... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main function, just initialize task and run it | def main():
task = Task()
task.run_task() | [
"def main():\n\n from sys import argv\n opts = getopts(argv)\n\n if \"-t\" in opts:\n task_name = opts[\"-t\"]\n else:\n print(\"Error: must specify -t\")\n return\n\n task_map = {\n \"coin_list\": import_coin_list,\n \"historical\": import_historical_data,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Visualize multiple images in grid | def show_img_grid(self, imgs, row, col):
fig, axs = plt.subplots(nrows=row, ncols=col)
for idx, data in enumerate(imgs):
axs.ravel()[idx].imshow(data)
axs.ravel()[idx].set_title('Image # {}'.format(idx))
axs.ravel()[idx].set_axis_off()
plt.tight_layout()
... | [
"def display_multiple_images(xs):\n fig = plt.figure()\n fig.set_tight_layout(True)\n\n for i, x in enumerate(xs):\n ax = fig.add_subplot(1, len(xs), i + 1)\n ax.imshow(x.reshape(28, 28), cmap='gray')\n plt.show()",
"def show_batch(dataLoader, rows):\n for images, labels in dataLoader... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the compressed payload. This function generates multiple parts of the payload. Concatenating these parts and decompressing the result will yield a 4GB + len(overflow_data) chunk. The parts are generated such that sending one chunk will trigger a realloc() in the browser. The last part contains the final byte ... | def construct_payload():
compressor = zlib.compressobj(level=1, wbits=31) # include gzip header + trailer
parts = []
def add_part(size):
payload = bytearray()
payload += compressor.compress(bytearray(size))
payload += compressor.flush(zlib.Z_FULL_FLUSH)
parts.append(... | [
"def _generate_payloads(data, split_function):\n payload = gzip.compress(json.dumps(data).encode())\n\n if len(payload) < MAX_PAYLOAD_SIZE:\n return [payload]\n\n split_data = split_function(data)\n return _generate_payloads(split_data[0], split_function) + _generate_payloads(\n split_data... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the UofT Graduate GPA for a given grade. | def grade_to_gpa(grade):
letter_grade = "" # declare variable for str value
gpa = 0.0 # variable representing the gpa equivalent of the given grade
if type(grade) is str: # if grade given is string
if grade == " ": # ensure that grade is not equal to null,otherwise raise Value Error
... | [
"def get_gpa(self):\n # this function should caluclate the GPA and print it.\n # HINT: for each course in self._grades, you need to retrieve the corresponding points from Student.POINTS (and then calculate the GPA)\n sum = 0\n for x in self._grades:\n sum += self.POINTS[self._g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert integer grade to letter. | def mark_to_letter(grade):
letter_grade = "" # declare variable for letter_grade equivalent
if grade >= 0 and grade <= 100: # get valid grade integer
if grade > 89:
letter_grade = "A+"
elif grade > 84:
letter_grade = "A"
elif grade > 79:
... | [
"def print_letter_grade(self):\n for key in self.__subjects:\n grade = self.__get_average_for_subject(key)\n if grade >= 90:\n result = \"A\"\n elif grade >= 80:\n result = \"B\"\n elif grade >= 70:\n result = \"C\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert entities from PCRaster to NumPy. map Map you want to convert. mv Value to use in the result array cells as a missing value. Returns an array. | def pcr2numpy(
map,
mv):
return _pcraster.pcr2numpy(_pcraster.clone(), map, mv) | [
"def pcr_as_numpy(\n map):\n return _pcraster.pcr_as_numpy(_pcraster.clone(), map)",
"def map_class(fine_labels, mapping):\n return np.array([mapping[l] for l in fine_labels])",
"def map_values(data, column, mapping, **kwargs):\r\n data_column = data.loc[:, column]\r\n mapper = np.vectorize(l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reference PCRaster maps from NumPy arrays. map Map to reference. Returns an array. | def pcr_as_numpy(
map):
return _pcraster.pcr_as_numpy(_pcraster.clone(), map) | [
"def pcr2numpy(\n map,\n mv):\n return _pcraster.pcr2numpy(_pcraster.clone(), map, mv)",
"def map(self, map_func) -> object:\n # creates result array\n map_new_array = DynamicArray()\n\n # runs map_func from parameter on each element, then adds to result array\n for in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the Decompress method. | def testDecompress(self):
decompressor = zlib_decompressor.ZlibDecompressor()
compressed_data = (
b'x\x9c\x0b\xc9\xc8,V\x00\xa2D\x85\x92\xd4\xe2\x12=\x00)\x97\x05$')
uncompressed_data, _ = decompressor.Decompress(compressed_data)
expected_uncompressed_data = b'This is a test.'
self.assertE... | [
"def testDecompress(self):\n decompressor = zlib_decompressor.DeflateDecompressor()\n\n compressed_data = (\n b'\\x0b\\xc9\\xc8,V\\x00\\xa2D\\x85\\x92\\xd4\\xe2\\x12=\\x00)\\x97\\x05$')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the Decompress method. | def testDecompress(self):
decompressor = zlib_decompressor.DeflateDecompressor()
compressed_data = (
b'\x0b\xc9\xc8,V\x00\xa2D\x85\x92\xd4\xe2\x12=\x00)\x97\x05$')
uncompressed_data, _ = decompressor.Decompress(compressed_data)
expected_uncompressed_data = b'This is a test.'
self.assertEqu... | [
"def testDecompress(self):\n decompressor = zlib_decompressor.ZlibDecompressor()\n\n compressed_data = (\n b'x\\x9c\\x0b\\xc9\\xc8,V\\x00\\xa2D\\x85\\x92\\xd4\\xe2\\x12=\\x00)\\x97\\x05$')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert (linear) amplitude to decibel (log10(x)). >>> x[x>> y = 10 log(x) / log(10) log transform >>> y = ... rescale dyn range to [80, 0] | def amplitude_to_decibel(x, amin: float = 1e-10, dynamic_range: float = 80.0):
log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
if K.ndim(x) > 1:
axis = tuple(range(K.ndim(x))[1:])
else:
axis = None
log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)... | [
"def dB_to_linear(dB: float) -> float:\n return pow(10, dB / 10)",
"def lin_to_log_rescale(val,lower=1,upper=3,base=10):\n if lower > upper:\n raise ValueError('lower must be less than upper')\n \n lower = base**lower\n upper = base**upper\n \n val = np.array(val,copy=False)\n #offs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a cbor event to the two databases. Calls each the byte array handler as well as the event handler to insert the event in both databases accordingly. Gets called both by database connector as well as the function connector. Returns 1 if successful, otherwise 1 if any error occurred. | def insert_event(self, cbor):
self._handler.add_to_db(event_as_cbor=cbor, app=True) | [
"def addEventCallback(*args, **kwargs):\n \n pass",
"def _connect_db_signals(self):\n self.callman.register_handles({'ensemble': [self.obj.get_handle()]})\n self.callman.register_callbacks(\n {'ensemble-update': self.check_for_ensemble_change,\n 'ensemble-delete': self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the current sequence number of a given feed_id, returns an integer with the currently largest sequence number for the given feed. Returns 1 if there is no such feed_id in the database. | def get_current_seq_no(self, feed_id):
return self._handler.get_current_seq_no(feed_id) | [
"def get_latest_sequence_number(self):\n db_query = u\"SELECT sequence_number FROM block_chain ORDER BY ROWID DESC LIMIT 1;\"\n db_result = self.execute(db_query).fetchone()\n return db_result[0] if db_result is not None else 0",
"def find_first_id():\n try:\n rs = scraperwiki.sqlit... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a specific cbor event to the callee with the input feed_id and sequence number. Returns None if there is no such entry. | def get_event(self, feed_id, seq_no):
return self._handler.get_event(feed_id, seq_no) | [
"def _get_callback_id(self, callback):\n for event_id, entry in self.callbacks.iteritems():\n cb, once = entry\n if cb == callback:\n return event_id",
"def find_event(self, event_cls):\n return self._emittions[event_cls] \\\n if event_cls in self._emittions else None",
"de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the newest (the one with the highest sequence number) cbor event for a feed_id. Returns None if there is no such feed_id in the database. | def get_current_event(self, feed_id):
return self._handler.get_current_event_as_cbor(feed_id) | [
"def get_greatest_id(self):\n\t\tc = self.connection.cursor()\n\t\tc.execute(\"\"\"\n\t\t\tSELECT MAX(id)\n\t\t\tFROM Task\n\t\t\"\"\")\n\t\trow = c.fetchone()\n\t\tif row is None:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn row[0]",
"def get_max_event_id(query=\"\", max_events=MAX_EVENTS):\n logger.info(\"Get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all current feed ids in the database. | def get_all_feed_ids(self):
return self._handler.get_all_feed_ids() | [
"def get_feed_ids(self):\n feed_ids = self.db_connection.get_all_feed_ids()\n master_ids = self.db_connection.get_all_master_ids()\n own_ids = self.event_factory.get_own_feed_ids()\n master_ids.append(self.db_connection.get_host_master_id())\n\n # remove master feed ids\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query for spotify id, artist, and title | def query_spotify_id(search):
search = search.replace(" ", "+")
client_credentials_manager = SpotifyClientCredentials(client_id=os.environ.get("SPOTIFY_CID"),
client_secret=os.environ.get("SPOTIFY_SECRET"))
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
query = sp... | [
"def query_spotify(querystring):\n # get results for a query\n track_results = spotify.search(f'{querystring}', type='track', limit=10, offset=0, market='US')\n # list of tracks to serve\n to_serve = []\n # convert each song into a dict\n for item in track_results['tracks']... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The main method used to generate a WebPerformance object from the current web page. | def get(self):
return WebPerformance(
time_origin=self.get_time_origin(),
navigation_timing=self.get_navigation_timing(),
paint_timing=self.get_paint_timing(),
resources=self.get_resources()
) | [
"def webapp_performance():\n return profile",
"def main():\r\n\r\n import cProfile as profile\r\n import pstats\r\n\r\n global _PROFILING\r\n\r\n _PROFILING = True\r\n\r\n filename = 'Processor.profile'\r\n\r\n profile.run('_main()', filename)\r\n\r\n stats = pstats.Stats(filename)\r\n\r\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the timeOrigin precision value. This is the high resolution timestamp of the start time of the performance measurement. | def get_time_origin(self) -> float:
js = 'return window.performance.timeOrigin;'
time_origin = self._wait().until(lambda driver: driver.execute_script(js), 'Time Origin not generated yet')
return time_origin | [
"def get_time(self):\r\n return float(self._cur_time)",
"def precision(self):\n return self._metadata[\"precision\"]",
"def get_precision(self):\n return self.__precision",
"async def get_exposure_time(self, **kwargs: Any) -> float:\n return self._exposure_time",
"def _get_recording_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the PerformanceNavigationTiming object as a Python object. | def get_navigation_timing(self):
js = 'return window.performance.getEntriesByType("navigation")[0];'
navigation = self._wait().until(lambda driver: driver.execute_script(js), 'NavigationTiming not generated yet')
return NavigationTiming(**navigation) | [
"def get(self):\n return WebPerformance(\n time_origin=self.get_time_origin(),\n navigation_timing=self.get_navigation_timing(),\n paint_timing=self.get_paint_timing(),\n resources=self.get_resources()\n )",
"def get_paint_timing(self):\n js = 'retu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the PerformancePaintTiming object as a Python object. | def get_paint_timing(self):
js = 'return window.performance.getEntriesByName("first-contentful-paint")[0];'
paint = self._wait().until(lambda driver: driver.execute_script(js), 'PaintTiming not generated yet')
return PaintTiming(**paint) | [
"def get(self):\n return WebPerformance(\n time_origin=self.get_time_origin(),\n navigation_timing=self.get_navigation_timing(),\n paint_timing=self.get_paint_timing(),\n resources=self.get_resources()\n )",
"def performance_collector(self):\n ret =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of PerformanceResourceTiming objects as Python objects. | def get_resources(self):
js = 'return window.performance.getEntriesByType("resource");'
try:
resources = self._wait().until(
lambda driver: driver.execute_script(js),
message='Resources not generated yet or there are none')
return [ResourceTiming(*... | [
"def get_timings(self):\r\n return self.times",
"def standard_timings(self):\n base = 38\n sts = []\n for x in xrange(0, 8):\n st = standard_timings.GetStandardTiming(self._edid, (base + (x * 2)),\n self.edid_version)\n if st:\n sts.app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The time it takes for the page to load as experienced by the user. | def page_load_time(self) -> float:
return self.navigation_timing.load_event_end - self.navigation_timing.start_time | [
"def loading_time_secs(self):\n return self.__loading_time",
"def time_to_interactive(self) -> float:\n return self.navigation_timing.dom_complete",
"def fetch_time(self) -> float:\n return self.navigation_timing.response_end - self.navigation_timing.fetch_start",
"def on_screen_seconds(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The time it takes before the first byte of response is received from the server. | def time_to_first_byte(self) -> float:
return self.navigation_timing.response_start | [
"def timedTest(self, connection):\n start_time = time.time()\n connection.connect()\n connection.request(self.method, self.path)\n duration = time.time() - start_time\n response = connection.getresponse()\n self.body = response.read()\n if response.status != self.sta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The time it takes for the layout to be stabilized and the page is responsive. | def time_to_interactive(self) -> float:
return self.navigation_timing.dom_complete | [
"def IsAutoLayout(self) -> bool:",
"def time_per_demand_unit(self):\n return 1",
"def on_screen_seconds(self):\n return (datetime.now() - self.time).total_seconds()",
"def dpTime():\n subjects = loadSubjects(SUBJECT_FILENAME)\n startTime = time.time()\n dpAdvisor(subjects,maxWork)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of requests sent from start of navigation until end of page load. | def number_of_requests(self) -> int:
return len(self.resources) | [
"def get_num_requests(self):\n return len(self._requests)",
"def getNumRequests(self):\n # type: () -> int",
"def getPageCount(self):\n pass",
"def __len__(self):\n # note: this method is called twice when wrapping queries in list(), from py3.8+\n # https://bugs.python.org/i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The time taken to connect to the server. | def connection_time(self) -> float:
return self.navigation_timing.connect_end - self.navigation_timing.connect_start | [
"def network_time(self):\n if self.has_fetch:\n return self.shuffle_finish_time - self.start_time - self.local_read_time\n return 0",
"def get_connection_timeout(self):\n return self._conntimeout",
"def _get_connection_time(self):\n return self.__connection_time",
"def timedTest(self, con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The time to complete the document fetch (including accessing any caches, etc.). | def fetch_time(self) -> float:
return self.navigation_timing.response_end - self.navigation_timing.fetch_start | [
"def complete_time(self):\n\n return self.__end",
"def node_finished(self):\n if self._all_nodes_finished():\n elapsed_time = datetime.utcnow() - self._collect_start_time\n logging.info(\"Time elapsed during collection: %ss\", elapsed_time)",
"def network_time(self):\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cross of (set to False) those indexes that are divisible by `prime` | def cross_off(flags, prime):
i = prime * prime
while i < len(flags):
flags[i] = False
i += prime | [
"def prime():\n array = []\n for i in range(2, 1000):\n if i % 2 != 0 and i % 3 != 0 and i % 5 != 0 and i % 7 != 0 and i % 11 != 0 or i == 2 or i == 3 or i == 5 or i == 7 or i == 11:\n array.append(i)\n return array",
"def test_basis_cross_products(self):\n self.assertEqual(vecto... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find the index of the first True in flags after `prime` | def get_next_prime(flags, prime):
np = prime + 1
while np < len(flags) and flags[np] is False:
np += 1
return np | [
"def __find_prime_in_row(marked,row):\n\n marked_col = tf.squeeze(tf.gather(marked, col))\n idx_find = tf.where(tf.equal(marked_col, 2))\n\n try:\n col = tf.segment_min(idx_find)\n return col\n except Exception as e :\n return -1 # return col = -1 when we find now row containing a \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a `ContinuousStateTransition` instance. | def __init__(self,
transition_mean_networks,
distribution_dim,
num_categories=1,
cov_mat=None,
use_triangular_cov=False,
use_trainable_cov=True,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigm... | [
"def make_transition(self):\n # next transition is a departure\n if self.state == 'COLD' or self.state == 'WARM':\n self.state = 'IDLE'\n self.is_busy = False\n self.is_cold = False\n\n # next transition is a termination\n elif self.state == 'IDLE':\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a `DiscreteStateTransition` instance. | def __init__(self,
transition_network,
num_categories):
super(DiscreteStateTransition, self).__init__()
self.dense_net = transition_network
self.num_categ = num_categories | [
"def __init__(self,\n transition_mean_networks,\n distribution_dim,\n num_categories=1,\n cov_mat=None,\n use_triangular_cov=False,\n use_trainable_cov=True,\n raw_sigma_bias=0.0,\n sigma_min=1e-5,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a `RnnInferenceNetwork` instance. | def __init__(self,
posterior_rnn,
posterior_dist,
latent_dim,
embedding_network=None):
super(RnnInferenceNetwork, self).__init__()
self.latent_dim = latent_dim
self.posterior_rnn = posterior_rnn
self.posterior_dist = posterior_dist
if embe... | [
"def __init__(\n self,\n input_size=1,\n output_size=1,\n n_units=50,\n dale_ratio=None,\n autopses=True,\n connectivity=None,\n input_connectivity=None,\n output_connectivity=None,\n ):\n # Initialize base RNN class\n RNNBase.__init__(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
system_e_id = is the access key to the object. You can get it from vobject.e_id 1) remove vobjects from vm_object_dic (self.vm_object_dic in vm_session object) 2) remove vobjects from vobject_liststore_dict (self.vobject_liststore_dict in main object) 3) remove system from system_liststore (self.system_liststore in mai... | def delete_system (self, system_e_id = None ):
#print(system_e_id)
#parent_key = self.treeview.main.p_session.psystem[system_e_id].e_treeview_iter_parent_key
if system_e_id != None:
'''organizing the list of vobjects that should be removed from vm_object_... | [
"def delete_vm_object (self, vm_object_index = None):\n if vm_object_index != None:\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n vobject = self.vm_session.vm_objects_dic[vm_object_index]\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
vm_object_index = is the access key to the object. You can get it from vobject.index '''When an object is removed it has to be removed from the treeview and vobject_liststore_dict, in addition to the vm_object_dic in the .vm_session.''' | def delete_vm_object (self, vm_object_index = None):
if vm_object_index != None:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
vobject = self.vm_session.vm_objects_dic[vm_object_index]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -... | [
"def unindex_object(obj, event):\n obj.unindexObject()",
"def delete_system (self, system_e_id = None ):\n #print(system_e_id)\n #parent_key = self.treeview.main.p_session.psystem[system_e_id].e_treeview_iter_parent_key \n \n if system_e_id != None:\n \n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function refreshes the number of frames on the main treeview. The self.tree_iters list contains all the "parents", or the treeview lines, in the TreeView vismol_object.e_treeview_iter_parent_key | def refresh_number_of_frames (self):
for index, vobject in self.main.vm_session.vm_objects_dic.items():
treeview_iter = vobject.e_treeview_iter
size = len(vobject.frames)
self.treestore[treeview_iter][8] = size
#print(index, self.treestore[treeview_it... | [
"def TreeDepth(self):\n\t\tfor index in range(len(self.tree)):\n\t\t\tself.tree[index]['D'] = self._nodeDepth(index)",
"def grow_trees(self, trees_count):\n for i in range(trees_count):\n tree = self.grow_tree()",
"def update_counter(self):\n self.node_counter += 1",
"def update_value... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
serializer handles missing paddings | def test_serializer_handles_paddings(self):
for i in range(100):
wet = 'Lorem ipsum %s' % ('a' * i)
dry = serializer.dumps(wet)
self.assertFalse(dry.endswith('='))
self.assertEqual(wet, serializer.loads(dry)) | [
"def _pad_with_nulls(data, len_):\n return data + (b'\\x00' * (len_ - len(data)))",
"def test_padding(self):\n for pad in [\"pad_first\", \"pad_before_eq\", \"pad_after_eq\"]:\n node = Attribute(wraptext(\"id\"), wraptext(\"foo\"), **{pad: \"\\n\"})\n self.assertEqual(\"\\n\", geta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the sigma points by method used in [1] | def get_sigma_points(self):
U = scipy.linalg.cholesky((self._lambda + self._dim_x) * self.P)
sigma_points = np.zeros((self.n_sigma, self._dim_x))
sigma_points[0] = self.x
for i in range(self._dim_x):
sigma_points[i+1] = self.x + U[i]
sigma_points[self._dim_x+i+1] ... | [
"def sigma(self):\r\n return self._sigma",
"def get_sigma(X, nn):\n\n dist = euclid(X, X) # calculate distance between points\n dist.sort() # sort ascending\n dist = dist[:, 1:] # drop zeros of diagonal\n sigma = dist[:, :nn].mean(axis=1).mean() # mean of nn nearest neighbors\n # dist = d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |