query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Assert all quit jobs are sent to collaborators. | def all_quit_jobs_sent(self):
return set(self.quit_job_sent_to) == set(self.authorized_cols) | [
"def test_all(self):\n self.assertEqual(twisted.mail.pop3client.__all__, [])",
"def testQuitAll():\r\n allScripts = BrewPiProcesses()\r\n allScripts.update()\r\n print (\"Running instances of BrewPi before asking them to quit:\")\r\n pprint.pprint(allScripts)\r\n allScripts.quitAll()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RPC called by a collaborator to determine which tasks to perform. | def get_tasks(self, collaborator_name):
self.logger.debug(
f'Aggregator GetTasks function reached from collaborator {collaborator_name}...'
)
# first, if it is time to quit, inform the collaborator
if self._time_to_quit():
self.logger.info(f'Sending signal to col... | [
"def _service_task(self):\r\n pass",
"def get_task(self, task_name):",
"def task_list(self):\n self.developed_model_version_id = new_models(self.old_developed_model_version_id,\n db_connection=self.db_connection,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the collaborator has completed the task for the round. The aggregator doesn't actually know which tensors should be sent from the collaborator \ so it must to rely specifically on the presence of previous results | def _collaborator_task_completed(self, collaborator, task_name, round_num):
task_key = TaskResultKey(task_name, collaborator, round_num)
return task_key in self.collaborator_tasks_results | [
"def _is_round_done(self):\n tasks_for_round = self.assigner.get_all_tasks_for_round(\n self.round_number\n )\n\n return all([self._is_task_done(t) for t in tasks_for_round])",
"def mcmc_done(self):\n if self.mcmc_results is not None:\n return True\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RPC called by collaborator. Transmits collaborator's task results to the aggregator. | def send_local_task_results(self, collaborator_name, round_number, task_name,
data_size, named_tensors):
self.logger.info(
f'Collaborator {collaborator_name} is sending task results '
f'for {task_name}, round {round_number}'
)
task_key = T... | [
"def push_result(self, task_request, task_response):",
"def _task_submitter_impl(self) -> None:\n log.debug(\n \"%s: task submission thread started (%s)\", self, threading.get_ident()\n )\n to_send = self._tasks_to_send # cache lookup\n\n # Alias types -- this awkward typin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract the named tensor fields. Performs decompression, delta computation, and inserts results into TensorDB. | def _process_named_tensor(self, named_tensor, collaborator_name):
raw_bytes = named_tensor.data_bytes
metadata = [{'int_to_float': proto.int_to_float,
'int_list': proto.int_list,
'bool_list': proto.bool_list}
for proto in named_tensor.transfo... | [
"def _extract_tensor_metadata(result: torch.Tensor) -> TensorMetadata:\n shape = result.shape\n dtype = result.dtype\n requires_grad = result.requires_grad\n stride = result.stride()\n\n memory_formats = {\n torch.contiguous_format,\n torch.channels_last,\n torch.channels_last_3d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the round complete. If so, perform many end of round operations, such as model aggregation, metric reporting, delta generation (+ associated tensorkey labeling), and save the model | def _end_of_round_check(self):
if not self._is_round_done():
return
# Compute all validation related metrics
all_tasks = self.assigner.get_all_tasks_for_round(self.round_number)
for task_name in all_tasks:
self._compute_validation_related_task_metrics(task_name)
... | [
"def save_model(self):\n torch.save(\n {\n 'epoch': self.epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': self.opt.state_dict(),\n 'acc': self.val_stats[\"acc\"],\n }, os.path.join(self.params.model_dir,\"snapsho... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check that round is done. | def _is_round_done(self):
tasks_for_round = self.assigner.get_all_tasks_for_round(
self.round_number
)
return all([self._is_task_done(t) for t in tasks_for_round]) | [
"def _end_of_round_check(self):\n if not self._is_round_done():\n return\n\n # Compute all validation related metrics\n all_tasks = self.assigner.get_all_tasks_for_round(self.round_number)\n for task_name in all_tasks:\n self._compute_validation_related_task_metrics... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The vocabulary should have 121 items. | def test_vocabulary_size(self):
self.assertEqual(len(frompcset), 121) | [
"def vocab_size(self) -> int:",
"def vocabulary_length(self) -> int:\n pass",
"def generate_vocabulary(self):\n print(' >>> Generating vocabulary...', end='', flush=True)\n self.vocabulary = {}\n for letter in self.text:\n if letter in self.vocabulary.keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The pcsets in the vocabulary and PCSETS should be the same. | def test_pcsets_in_vocabulary(self):
pcsets = set(frompcset.keys())
self.assertEqual(pcsets, set(PCSETS)) | [
"def test_vocabulary_size(self):\n self.assertEqual(len(frompcset), 121)",
"def available_subsets(self):\n return ['train', 'valid']",
"def test_subsets(self):\n t = self.t \n self.assertEqual(t.subsets(), frozenset(\n [frozenset('HG'), frozenset('RM')]))",
"def test_load_al... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify that you get sensible pcset matches. | def test_closest_pcset(self):
for pcset, pcsGT in pcsetsGT.items():
pcs = closestPcSet(pcset)
with self.subTest(pcset=pcset, closest_match=pcsGT):
self.assertEqual(pcs, pcsGT) | [
"def test_pcsets_in_vocabulary(self):\n pcsets = set(frompcset.keys())\n self.assertEqual(pcsets, set(PCSETS))",
"def is_set(cards):\n \n res=False\n verifylist=[False]*4 # Creates a list of Flase bool to verify the attributs\n count=0\n\n if len(cards)==3: \n for i,j,k in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract traces from emcee.EnsebleSampler and apply invers transformation of parameters | def get_traces(sampler, nthin):
# load every nthin'th sample from the walkers and reshape to
# final dimensions
traces = sampler.chain[:, ::nthin, :].reshape(-1, sampler.dim).copy()
# convert from sample space to meaningfull space
traces[:, [1, 4, 5]] = np.exp(traces[:, [1, 4, 5]])
return traces | [
"def extr():\n x = ExtractInterpretationToPoints('WV_C18_L12')\n yield x",
"def process_epidemic_parameters(self):",
"def update_traces(self):\n # Decay traces:\n # Input to hidden:\n self.xy_reg_traces *= 0. # Regular traces decay in one time step\n self.xy_mem_traces *= self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if we want to be in dev mode or staging mode, this will be used to pull in correct settings overrides. | def env_mode():
if os.environ.get('DEV_MODE') is not None:
return 'DEV'
if os.environ.get('STAGING_MODE') is not None:
return 'STAGING' | [
"def is_dev_env() -> bool:\n if os.getenv(\"APP_ENV\") == \"dev\":\n return True\n return False",
"def is_dev():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')",
"def is_production() -> bool:\n\n return conf(\"app.mode\") == \"prod\"",
"def is_dev():\n\treturn os.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that amity creates rooms of either office or living space | def test_create_room(self):
self.assertIn("created successfully", self.amity.create_room(["earth"], "office")) | [
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def test_can_add_office(self):\r\n self.amity.add_room(\"Codango\", 'o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that amity does not create duplicate rooms | def test_amity_does_not_create_duplicte_rooms(self):
self.amity.create_room(["void"], "office")
response = self.amity.create_room(["void"], "livingspace")
self.assertEqual(1, len(self.amity.rooms)) | [
"def test_create_room(self):\n self.assertIn(\"created successfully\", self.amity.create_room([\"earth\"], \"office\"))",
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that amity can add a person to a the amity system | def test_add_person(self):
role = "FELLOW"
name = "SAKAZUKI AKAINO"
accommodate = "Y"
response = self.amity.add_person(role, name, accommodate)
self.assertIn("has been added successfully to the system", response) | [
"def test_add_person_allocates_rooms(self):\n self.amity.create_room([\"mars\"], \"office\")\n self.amity.create_room([\"earth\"], \"livingspace\")\n response = self.amity.add_person(\"fellow\", \"monkey luffy\", \"y\")\n self.assertIn(\"successfully\", response)",
"def test_add_person... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that when a person is added to a room the person is allocated a room | def test_add_person_allocates_rooms(self):
self.amity.create_room(["mars"], "office")
self.amity.create_room(["earth"], "livingspace")
response = self.amity.add_person("fellow", "monkey luffy", "y")
self.assertIn("successfully", response) | [
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.amity.add_person(\"staff\", \"Bosalino Kizaru\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a staff member can not be allocated to a living space | def test_add_person_staff_cannot_be_allocated_livingspace(self):
self.amity.create_room(["pluto"], "livingspace")
response = self.amity.add_person("staff", "Sakazuki Akainu", "Y")
self.assertIn("staff can not be allocated accommodation", response) | [
"def test_unallocated_person(self):\n\n response = self.amity.add_person(\"staff\", \"Kuzan Aokiji\")\n self.assertIn(\"unallocated\", response)",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_pers... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that amity does not add people to full rooms | def test_add_person_cannot_allocate_person_to_a_full_room(self):
self.amity.create_room(["jupiter"], "office")
self.amity.add_person("staff", "Monkey Garp")
self.amity.add_person("staff", "Kuzan Aokiji")
self.amity.add_person("staff", "Bosalino Kizaru")
self.amity.add_person("sta... | [
"def test_amity_does_not_create_duplicte_rooms(self):\n self.amity.create_room([\"void\"], \"office\")\n response = self.amity.create_room([\"void\"], \"livingspace\")\n self.assertEqual(1, len(self.amity.rooms))",
"def test_for_room_avaialble(self):\n\t\tself.assertIs(self.office.is_filled()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that people that have not been allocated space are stored somewhere | def test_unallocated_person(self):
response = self.amity.add_person("staff", "Kuzan Aokiji")
self.assertIn("unallocated", response) | [
"def test_list_of_unallocated_people(self):\r\n self.assertIsNotNone(self.amity.get_a_list_of_unallocated_people())",
"def test_add_person_cannot_allocate_person_to_a_full_room(self):\n self.amity.create_room([\"jupiter\"], \"office\")\n self.amity.add_person(\"staff\", \"Monkey Garp\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that amity can reallocate people to other rooms | def test_reallocate_person(self):
self.amity.create_room(["venus"], "livingspace")
id_no = self.amity.get_person_id("Daniel Sumba")
response = self.amity.reallocate_person(id_no, "venus")
self.assertIn("has been successfully moved", response) | [
"def do_reallocate_person(self, args):\n # try:\n if amity.validate_email(args['<email>']) == \"Invalid\":\n error_msg = args['<email>'] + \" is not a valid email.\"\n click.secho(error_msg, fg='red')\n return\n if amity.validate_room_name(args['<new_room>']) ==... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that amity can add people from a .txt file | def test_load_people(self):
response = self.amity.load_people("people.txt")
self.assertIn("successfully", response) | [
"def test_add_person(self):\n role = \"FELLOW\"\n name = \"SAKAZUKI AKAINO\"\n accommodate = \"Y\"\n response = self.amity.add_person(role, name, accommodate)\n self.assertIn(\"has been added successfully to the system\", response)",
"def load_people(self, filename):\n #O... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate a TopicModel object. | def __init__(self, model_path, dataset_path):
self.model = LdaModel.load(model_path)
self.dataset = ArXivDataset.load(dataset_path)
self.num_topics = self.model.num_topics
self.topic_names = list(range(self.num_topics))
self.topics = self.model.show_topics(num_topics=self.num_top... | [
"def load_gensim_model(self, gensim_model, corpus, dictionary, dimension_range=[5, 20], n_samples=5, n_initializations=10, custom_params=None):\n\n parameters = {\n \"num_topics\":\n {\"type\": int, \"mode\": \"range\", \"values\": dimension_range}\n }\n\n if custom_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Predict topics for a piece of text. | def predict(self, text):
bow_transformed = self.dataset.transform([text])[0]
topic_predictions = self.model.get_document_topics(bow_transformed)
sorted_predictions = sorted(topic_predictions, key=lambda x: x[1],
reverse=True)
sorted_predictions = [(sel... | [
"def predict(self, text):\n\n if not models:\n self.__init__(self.filename, force_load=True)\n vec = self.tokenize(text)\n print(\"BoW:\")\n print(vec)\n topics = np.array(self.model[vec], dtype=[('topic_id', int), ('confidence', float)])\n topics[::-1].sort(orde... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simple method to compute distances from points in v1 to points in v2. | def distances(self, v1, v2):
v1_2 = v1.unsqueeze(1).expand(v1.size(0), v2.size(0), v1.size(1))
v2_2 = v2.unsqueeze(0).expand(v1.size(0), v2.size(0), v1.size(1))
return torch.sqrt(torch.pow(v2_2 - v1_2, 2).sum(2) + 0.000000001) | [
"def vec_dist(v1, v2):\n dist = 0\n j = 0\n for i in range(len(v1)):\n while j<len(v2) and v1[i][0]>v2[j][0]:\n dist = dist + v2[j][1]**2\n j = j + 1\n p = v1[i][1]**2 if j>=len(v2) or v2[j][0]>v1[i][0] \\\n else (v2[j][1]-v1[i][1])**2\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
result[0], label and weight are lists of size gpus = batch_size. segmentation has as many elements as UResNet returns. label[0] has shape (N, 1) where N is pts across minibatch_size events. | def forward(self, result, label, cluster_label):
assert len(result['segmentation']) == len(label)
batch_ids = [d[0][:, -2] for d in label]
uresnet_loss, uresnet_acc = 0., 0.
cluster_intracluster_loss = 0.
cluster_intercluster_loss = 0.
cluster_reg_loss = 0.
clust... | [
"def training_pool(self):",
"def train_conv_net(datasets,\n U,\n word_idx_map,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves the current session's tweets to tweets.json. | def save_tweets():
with open("tweets.json", "w") as outfile:
json.dump(session["tweets"], outfile) | [
"def save_tweet(self,tweet):\n with open(self.tweets_file, \"ab\") as output:\n output.write(tweet['id']+','+tweet['created']+','+tweet['text']+','+tweet['retweet_count']+','+tweet['favorite_count']+','+tweet['lang']+','+tweet['country']+','+tweet['city']+','+tweet['province']+'\\n')\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes a tweet from the server. Requires the current user to be logged in and deleting a tweet they posted. | def delete_tweet():
tw_id = request.args.get("tweet")
global_feed = request.args.get("global")
tws = session["tweets"]
tws.pop(tw_id)
session["tweets"] = tws
save_tweets()
if global_feed == "True":
return redirect("/global_feed")
else:
return redirect("/personal_feed") | [
"def delete_tweet(self, id=None):\n try:\n # Destroy the status object with the id equal to the passed id\n self.api.destroy_status(id=str(id))\n\n except Exception as e:\n print(str(e))\n sys.exit(0)",
"def remove_tweet(cls, cursor, tweet_id):\n cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renders global Tweeter feed, in chronological order of most recent retweet or time of posting. The global is global because it includes tweets posted by all users. | def global_feed():
if "user" in session:
return render_template("global_feed_template.html",
tweets=Tweets(session["tweets"]),
user=session["user"],
users=json.load(open("users.json")),
... | [
"def retweet():\n tw_id = request.args.get(\"tweet\")\n\n tws = session[\"tweets\"]\n tws[tw_id][\"retweet_time\"] = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n tws[tw_id][\"retweeter\"] = session[\"user\"]\n\n session[\"tweets\"] = tws\n save_tweets()\n\n return redirect(\"/personal_feed\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Marks a tweet as retweeted by the current user. This moves a tweet to the top of the global feed for all and the top of a user's personal feed if the tweet was posted by or retweeted by someone they follow. Code in the html template with Jinja guarantees retweets can only occur when a user is logged in. | def retweet():
tw_id = request.args.get("tweet")
tws = session["tweets"]
tws[tw_id]["retweet_time"] = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
tws[tw_id]["retweeter"] = session["user"]
session["tweets"] = tws
save_tweets()
return redirect("/personal_feed") | [
"def reply():\n # Get all (available) status texts by Int_SORSE after last seen tweet id\n id = read_last_seen()\n new_tweets = []\n new_statuses = Cursor(api.user_timeline, id=RETWEET_USER, since_id=id).items()\n\n # Add all new statuses since the last seen to list\n for status in new_statuses:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Registers a user to users.json. Now that user can log in via /login. | def register():
if request.method == 'GET':
return render_template("register_template.html",
title="Register")
un = request.form["username"]
pw = request.form["password"]
users = json.load(open("users.json"))
if not un:
flash("Please provide a non-em... | [
"def register(ctx, username, password):\n url = ctx.obj['URLS'].register_user()\n headers = ctx.obj['HEADERS']\n data = {\n 'username': username,\n 'password': password,\n 'verify': False\n }\n try:\n r = requests.post(url, json=data, headers=headers)\n r.raise_for_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up links to module_directory/base_binary under the given link_names in $PATH | def make_links(module_dir, base_binary, link_names):
if os.path.isfile(module_dir + "/" + base_binary):
for alias in link_names:
try:
os.unlink(tmpdir + "/bin/" + alias)
except OSError:
pass
os.symlink(os.getcwd() + "/" + module_dir + "/" +... | [
"def link(paths):\n with LogSection(\"Setting up symlinks...\"):\n dotfile_dir = os.path.dirname(os.path.realpath(__file__))\n for src, dst in sorted(paths.items(), key=lambda item: item[0]):\n src = expand(os.path.join(dotfile_dir, src))\n dst = expand(dst)\n if os... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes the attention and alignments for a given attention_mechanism. | def _compute_decoder_attention(cell_output, hidden_states, previous_alignments, attention_layer):
print('cell_output', cell_output, 'hidden_states', hidden_states)
hidden_states_stack = tf.stack(hidden_states, axis=1)
print('hidden_states_stack', hidden_states_stack)
# with variable_scope... | [
"def _compute_attention(attention_mechanism, cell_output, attention_state,\n attention_layer):\n alignments, next_attention_state = attention_mechanism(\n cell_output, state=attention_state)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct the `JointAttentionWrapper`. NOTE If you are using the `BeamSearchDecoder` with a cell wrapped in | def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
... | [
"def create_joint_at(obj = None):\n\n return create_at(obj, create = 'joint')",
"def create_weld_joint(self, *args, **kwargs):\r\n joint = joints.WeldJoint(*args, **kwargs)\r\n self.add_joint(joint)\r\n return joint",
"def new_joint(name, **kwargs):\n return new_element(tag=\"joint\",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor. | def _item_or_tuple(self, seq):
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0] | [
"def MakeSeq( val ):\n return val if IsSeq( val ) else ( val, )",
"def _get_simple_sequence_2tuple(self):\n simple_sequence = [\n 1234, 1.234, '1234', datetime.date.today(), datetime.datetime.today(), self._get_test_safe_3tuple[1]\n ]\n result_simple_sequence = [str(v) if not ha... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The `state_size` property of `JointAttentionWrapper`. | def state_size(self):
return JointAttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
encoder_attention=self._attention_layer_size,
decoder_attention=self._cell.state_size,
decoder_states=[],
encoder_... | [
"def state_size(self):\n return self._state_size",
"def state_size(self):\n #############################################\n # TODO: YOUR CODE HERE #\n #############################################\n params = self.params\n return params[0]+params[1]\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an initial (zero) state tuple for this `JointAttentionWrapper`. NOTE Please see the initializer documentation for details of how to call `zero_state` if using an `JointAttentionWrapper` with a `BeamSearchDecoder`. | def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)... | [
"def _get_zero_state(source_cell_fw, source_cell_bw):\n zero_fw = source_cell_fw.zero_state(self.batch_length, self.float_type)\n zero_bw = source_cell_bw.zero_state(self.batch_length, self.float_type)\n return zero_fw, zero_bw",
"def get_initial_state(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert the types of inputs are the same | def assert_same_type(*inputs) -> bool:
first, *others = inputs
# single input
if not others:
return True
_class = type(first)
for ix, obj in enumerate(others):
if not isinstance(obj, _class):
raise TypeError(f"Input types don't agree. This method accepts multipl... | [
"def test_equal_on_type_mismatch(self):\n a = payloads.DeriveKeyRequestPayload()\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)",
"def test_inputs(self):\n r = ResultError('bob', 'a', '1', 'xxxx', 'val', [\n ('b', '1', 'xxxx', 'hash'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For change or add user portrait | def change_user_portrait(user_id):
user = User.query.filter(User.user_id == user_id).first()
if not user:
return render_template("404.html"), 404
if request.method == "POST":
if "file" not in request.files:
flash("No file part")
return redirect(request.url)
fi... | [
"def to_portrait(self) -> None:\n if self.is_landscape:\n self.width, self.height = self.height, self.width",
"def yourportrait():\n\n return render_template(\n 'your_portrait.html',\n your_portraitActive='active'\n )",
"def change_orientation_portrait(self):\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Computes how much slack time the bus can use assuming its next checkpoint is `nxt_chk_id`. This is based on the formula in the MAST paper. | def usable_slack_time(self, t, nxt_chk_id, chkpts):
init_slack = self.init_slack_times[nxt_chk_id]
avail_slack = self.avail_slack_times[nxt_chk_id]
next_chk = chkpts[nxt_chk_id]
prev_chk = chkpts[nxt_chk_id - 1]
t_now = t - self.start_t
if t_now < prev_chk.dep_t:
... | [
"def AvgOneWaitTime(self, chk, rng=5):\n tot = 0\n count = 0\n\n for num in range(rng):\n try:\n tot += int(self.CheckpointWaitTimes[chk-1][\"WaitTimes\"][num][\"WaitTime\"])\n count += 1\n except:\n # print \"Not enough wait ti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given distributed arrays with the lengths and offsets of groups in an array of particle IDs, compute the group index corresponding to each particle ID. | def group_index_from_length_and_offset(length, offset, nr_local_ids,
return_rank=False, comm=None):
if comm is None:
from mpi4py import MPI
comm = MPI.COMM_WORLD
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
# Ensure lengths and offsets ... | [
"def get_group_indices(groups_list,group_number):\r\n file = open(groups_list, 'r')\r\n lines=file.readlines()\r\n group_indices = np.fromstring(lines[group_number],sep=\"\\t\", dtype = int)\r\n\r\n return group_indices",
"def get_group_index_lists(group_ids):\n groups = list(set(gr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a key tuple, and removes a key tuple without a primary key, but with a key digest | def get_key_with_digest_only(key):
return (key[0], key[1], None, key[3]) | [
"def remove_by_hash(hashval: str) -> None:\n key_details = get_keys()\n with authorized_keys(\"w\") as ak:\n for keyhash, key in key_details:\n if keyhash != hashval:\n ak.write(f\"{key}\\n\")\n break\n else:\n raise KeyError(hashval)",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For every node in the tree, check if it is unique compared to prevously visited nodes. If NOT unique merge the node with node checking against. If unique add to list of unique | def greedy(startNode: Node, unique = []):
if (len(unique) == 0):
unique.append(startNode)
for root in unique:
if root.children: # Check if given root has children
for child in root.children: # Check if any children can merge with the current uniques
isUnique = True # Becomes false if a node is able t... | [
"def remove_duplicates(self):\n seen = set()\n self.nodes = [x for x in self.nodes if x not in seen and not seen.add(x)]",
"def merge_duplicates(self, tree: list):\n tree_set = []\n\n for node in tree:\n if node in tree_set:\n # node with the same key, merge n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Similar to greedy Attempt to match labels If a match is found, copy the tree and merge the nodes Continue until there are no nodes in tree that are not unique See if the tree is complete, check if every node has labels, if not assign label at random Test if the tree is correct, if not backtrack, pop tree from list | def backtracking(apta: Apta, unique = []):
# print(unique)
if(len(unique) == 0):
unique.append(apta.root)
# Check if finished
if apta.complete() == True:
return
# Check promising
for root in unique:
if root.children:
for child in root.children:
if child not in unique:
for node in unique:
... | [
"def select_case_2(data,labels,T,budget,batch_size):\n\n n_nodes = len(T[1]) #total nodes in T\n n_samples = len(data) #total samples in data\n L = np.zeros(n_nodes) #majority label\n p1 = np.zeros(n_nodes) #empirical label frequency\n n = np.zeros(n_nodes) #number of points sampled from each node\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
menu for import data TODO .... | def menuentriesimport(self):
menutrig = True
while menutrig:
choose = raw_input("Choose your Task:\n Read Database: 1 \n Read vCard: 2 \n back: b \n ::>")
if choose == "1":
pathandfile = raw_input("Enter Path and Filename:")
base = importDataobj.lo... | [
"def cmd_import(self):\n self.save()\n path = tkinter_filedialog.askopenfilename(\n initialdir=self.prefs[\"save_directory\"],\n filetypes=[(\"aeneas output ZIP file\", \".zip\"), (\"SMIL file\", \".smil\")],\n parent=self,\n title=\"Select aeneas output (SM... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the API endpoint to query margin open orders | def test_margin_open_orders():
client = Client(key, secret)
response = client.margin_open_orders(**params)
response.should.equal(mock_item) | [
"def open_orders():\n return _make_request('orders/own', private=True)['orders']",
"def test_get_all_orders(self, client, auth_token):\n\n response = client.get(\n \"/api/v1/parcels\",\n headers=dict(Authorization=\"Bearer \" + auth_token))\n\n res_data = json.loads(response... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generator that deserializes and provides casing objects. Doing it this way, instead of using fixtures, means we don't have to maintain the json, it will always work as it has access to the historic model. | def casing_codes():
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, '0006_load_casing_code.json'), 'r') as json_data:
data = json.load(json_data)
for item in data:
yield item | [
"def test_case_insensitive(self):\n\n @KeyLookup(graph_ci, \"a\", [\"b\"], idstruct_class=CIIDStruct)\n def load_document(doc_lst):\n for d in doc_lst:\n yield d\n\n # Test Case - upper case A in id\n doc_lst = [{\"_id\": \"A:1234\"}]\n res_lst = load_doc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract setup.py content as string from downladed tar | def _extract_setup_content(package_file, name):
tar_file = tarfile.open(fileobj=package_file)
setup_candidates = [elem for elem in tar_file.getmembers() if 'setup.py' in elem.name]
if len(setup_candidates) >= 1:
a = [elem.name for elem in setup_candidates]
setup_member = min(a, key=lambda x... | [
"def load_setup_py_file(self, pr_ref: str):\n repo = self.get_repo()\n response = repo.get_contents(\"setup.py\", ref=pr_ref)\n self._setup_py = str(response.decoded_content, \"utf-8\")",
"def _setup_body(setup_conf: SETUP_CONFIG) -> str:\n return os.linesep.join([\n 'import sys',\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiates and returns the metrics defined in the configuration dictionary. All arguments are expected to be handed in through the configuration via a dictionary named 'params'. | def create_metrics(config):
return thelper.train.utils.create_consumers(config) | [
"def __init__(self, metrics_params=[]):\n Config._process_metrics(metrics_params)\n self._metrics = metrics_params",
"def generate_metrics(self):\n metrics = []\n if \"metrics\" not in self._settings or not isinstance(self._settings[\"metrics\"], dict):\n return metrics\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
iterates through list of strings to find 4 unique strings. | def find_4_unique_strings(w, h, list_of_strings):
for i in range(0, len(list_of_strings)):
# across2 = list_of_strings[i]
down2 = list_of_strings[i]
for i in range(0, len(list_of_strings)):
# down2 = list_of_strings[i]
across2 = list_of_strings[i]
if acr... | [
"def uniq(strings):\n return list(set(strings))",
"def unique_set(data: List[str]) -> List[str]:\n # TODO: Add the source code for method f7",
"def number_of_unique_strings(*arg):\n\n master_list = []\n for _list in arg:\n master_list = master_list + _list\n\n return len(set(master_list))"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Filters out datasets that we can't use since they are either lacking a release date or an original price. For rendering the output we also require the name and abbreviation of the platform. | def is_valid_dataset(platform):
if 'release_date' not in platform or not platform['release_date']:
logging.warn(u"{0} has no release date".format(platform['name']))
return False
if 'original_price' not in platform or not platform['original_price']:
logging.warn(u"{0} has no original pric... | [
"def _datasets_line(args):\n filter_ = args['filter'] if args['filter'] else '*'\n return _render_list([str(dataset) for dataset in datalab.bigquery.Datasets(args['project'])\n if fnmatch.fnmatch(str(dataset), filter_)])",
"def test_no_deprecated_datasets(self):\n result = self.stud... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
generates a bar chart out of the given platforms and saves as a png | def generate_plot(platforms, output_file):
labels = []
values = []
for platform in platforms:
name = platform['name']
adapted_price = platform['adjusted_price']
price = platform['original_price']
if price > 2000:
continue #i.e. skip
if len(name)>15:
... | [
"def barchart(kmer_vectors: dict) -> None:\n for genome_name in kmer_vectors:\n cur_v = kmer_vectors[genome_name]\n dataset = list()\n for item in cur_v:\n dataset.append(cur_v.get(item))\n a = np.array(dataset)\n base_labels = [item for item in cur_v]\n y_pos... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Screen scr the screen object num x, y, w1, h1, w2, h2 the loc and sizes of the two buttons tup color1, color2 the colors of the two buttons func resp the response to the click | def __init__(self, scr, x, y, w, h, color1, color2, size_factor=1, resp=lambda: None, delay_time=0.3):
if size_factor < 1:
raise InvalidBorderButtonError(w, h, w * size_factor, h * size_factor)
self.rect = Rect(scr, color1, x, y, w, h)
self.button = Button(scr, x + ((w - w * size_fac... | [
"def button1(msg,x,y,w,h,ic,ac,action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w>mouse[0]>x and y+h>mouse[1]>y:\n pygame.draw.rect(screen,ac,(x,y,w,h))\n if click[0]==1 and action!=None:\n action()\n else:\n pygame.draw.rect(scre... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Capitalize all named entities found in the given list of lines. | def capitalize_entities(lines):
ner_list = ["PERSON", "NORP", "FACILITY", "ORG", "GPE", "LOC", "PRODUCT",
"EVENT", "WORK_OF_ART", "LAW", "LANGUAGE"]
pos_list = ["ADJ", "ADV", "NOUN", "PROPN", "VERB"]
nlp = spacy.load("en")
doc = nlp(" ".join(lines))
update_dict = {}
for ent in do... | [
"def replace_with_uppercase(string, names, precompiled):\n for name in names:\n for result in precompiled[name].findall(string):\n string = string.replace(result, name)\n return string",
"def uncapitalize(s, preserveAcronymns='False'):\n\n pass",
"def capitalize(s):\n\n pass",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert all bill types into their acronym form (e.g. "assembly bill" > "ab") | def bill_types_to_acronyms(lines):
update_dict = {}
update_dict['assembly bill'] = 'ab'
update_dict['assembly bill number'] = 'ab'
update_dict['senate bill'] = 'sb'
update_dict['senate bill number'] = 'sb'
update_dict['house resolution'] = 'hr'
update_dict['house resolution number'] = 'hr'
... | [
"def acronym(input):\n words = input.split()\n res = ''\n for word in words:\n res = res + word[0].upper()\n return res",
"def applyAcronymToMsType (msType_phrase):\n mstype_dict = {\n 'Research Article':'Res',\n 'Short Report':'SR',\n 'Review':'Rw',\n 'Commentary':'Com',\n 'V... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets all futures for this delegate. These can be used to handle any pending futures when a peripheral is disconnected. | def futures(self) -> Iterable[asyncio.Future]:
services_discovered_future = (
(self._services_discovered_future,)
if hasattr(self, "_services_discovered_future")
else ()
)
return itertools.chain(
services_discovered_future,
self._servi... | [
"def async_all_discovered_devices(self) -> Iterable[BLEDevice]:\n return itertools.chain.from_iterable(\n scanner.discovered_devices for scanner in self._scanners\n )",
"async def get_all(self) -> List[T]:\n all_items = [await self.get()]\n try:\n while True:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transaction service sync message receive txs data | def msg_tx_service_sync_txs(self, msg: TxServiceSyncTxsMessage) -> None:
network_num = msg.network_num()
self.node.last_sync_message_received_by_network[network_num] = time.time()
tx_service = self.node.get_tx_service(network_num)
result_items = tx_service.process_tx_sync_message(msg)
... | [
"def conduct_transaction(self,trans,o):\n pass",
"def transaction_command():\n pass",
"def sendtx(cmd):\n txData = cmd.split(\"sendtx \")[-1]\n if \"{\" in txData:\n txData = json.loads(txData)\n print(\"Sending transaction...\")\n coin.addTx(txData)",
"def svn_txdelta_send_txstream... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the Google Docs parser from the ``WAGTAILCONTENTIMPORT_GOOGLE_PARSER`` setting, defaulting to wagtail_content_import.parsers.google.GoogleDocumentParser. | def get_google_parser():
parser_string = get_google_parser_string()
return import_string(parser_string) | [
"def read_google_parser_config():\n parsers_config_list = global_config.get('parsers')\n logging.info('Will get google')\n google_config = dict()\n for parser_config in parsers_config_list:\n if parser_config.get('name') == 'google':\n google_config = copy.deepcopy(parser_config)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the Office Open XML parser from the ``WAGTAILCONTENTIMPORT_DOCX_PARSER`` setting, defaulting to wagtail_content_import.parsers.microsoft.DocxParser. | def get_docx_parser():
parser_string = get_docx_parser_string()
return import_string(parser_string) | [
"def get_parser(self, force=False):\n if not self.__parser or force:\n self.__parser = self._create_parser()\n return self.__parser",
"def parser(self):\r\n if self._parser is None:\r\n self._parser = cache.load_module(self.path, self.name) \\\r\n or self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns confirmed infection cases for country 'Poland' given a date. Ex. >>> poland_cases_by_date(7, 3, 2020) 5 >>> poland_cases_by_date(11, 3) 31 | def poland_cases_by_date(day: int, month: int, year: int = 2020) -> int:
# Your code goes here (remove pass)
y = year % 100
return confirmed_cases.loc[confirmed_cases["Country/Region"]=="Poland"][f'{month}/{day}/{y}'].values[0] | [
"def query_cases_by_date(self, start, end):\n start_str = start.strftime(\"%m-%d-%Y\")\n end_str = end.strftime(\"%m-%d-%Y\")\n date_query_url = \"https://{}GetRecentCivilCases/{}/{}\".format(\n self.api_base,\n start_str,\n end_str,\n )\n r = self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the top 5 infected countries given a date (confirmed cases). Ex. >>> top5_countries_by_date(27, 2, 2020) ['China', 'Korea, South', 'Cruise Ship', 'Italy', 'Iran'] >>> top5_countries_by_date(12, 3) ['China', 'Italy', 'Iran', 'Korea, South', 'France'] | def top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:
# Your code goes here (remove pass)
y = year % 100
data=f'{month}/{day}/{y}'
top = confirmed_cases.groupby(["Country/Region"]).max().sort_values(by=data).tail(5).iloc[:,0].keys().tolist()[::-1]
return top | [
"def get_top_five_countries():\r\n\r\n lines = country_pop.split('\\n')\r\n co = []\r\n for line in lines:\r\n country= line.split('\\t')\r\n co.append(country)\r\n\r\n\r\n topfive= []\r\n for i in co[1:6]:\r\n topfive.append(i[1])\r\n return topfive",
"def compute_names_by_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the number of countries/regions where the infection count in a given day was the same as the previous day. Ex. >>> no_new_cases_count(11, 2, 2020) 35 >>> no_new_cases_count(3, 3) 57 | def no_new_cases_count(day: int, month: int, year: int = 2020) -> int:
# Your code goes here (remove pass)
date_now = datetime.date(year, month, day)
date_prev = date_now - datetime.timedelta(days=1)
pattern = '%#m/%#d/%y'
num_of_countries = confirmed_cases.count()['Country/Region']
nu... | [
"def nyt_cases_counties(df):\n # Cast date as datetime\n df['date'] = pd.to_datetime(df['date'])\n # Drop records with county = 'Unknown' or no FIPs code\n df = df.loc[(df['county'] != 'Unknown') & (df['fips'].notnull())].copy()\n # Store FIPS codes as standard 5 digit strings\n df['fips'] = _fips... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method that performs optimization using the simulated annealing method. Notes | def run_optimization(self, f, parameters, constraints=None):
assert constraints is None, "Simulated Annealing optimizer cannot handle restraints."
print("!=================================================================================!")
print("! STARTING SIMULATED ANNEAL... | [
"def optimize(self):\n raise NotImplementedError",
"def solve_sa(f, n=numpy.inf, m=numpy.inf, verbose=True):\n\n print 'Running simulated annealing...',\n if n < numpy.inf: print 'for %.f steps' % n\n if m < numpy.inf: print 'for %.2f minutes' % m\n\n t = time.time()\n a, k, s, v = 0, 0, 0, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add shared Hermes/MQTT commandline arguments. These are useful arguments for every Hermes client, concerning the connection, authentication, site IDs, debugging and logging. | def add_hermes_args(parser: argparse.ArgumentParser):
parser.add_argument(
"--host", default="localhost", help="MQTT host (default: localhost)"
)
parser.add_argument(
"--port", type=int, default=1883, help="MQTT port (default: 1883)"
)
parser.add_argument("--username", help="MQTT use... | [
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n self.createArgument('--stop', self.stop, 1, '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect to an MQTT broker with supplied arguments. | def connect(client: mqtt.Client, args: argparse.Namespace):
if args.username:
client.username_pw_set(args.username, args.password)
# TLS
if args.tls:
# TLS is enabled
if args.tls_version is None:
# Use highest TLS version
args.tls_version = ssl.PROTOCOL_TLS
... | [
"def connect_mqtt(self):\n\n\t\tdef on_connect(client, userdata, flags, rc):\n\t\t\t\"\"\"\n\t\t\tThis method is the callback for a connection try.\n\t\t\t:param client: the client\n\t\t\t:param userdata: the submitted userdata\n\t\t\t:param flags: the submitted connection flags\n\t\t\t:param rc: the response code\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Will resolve context processors from AppConfigs and add them to templates (list of backend configurations). | def add_required_context_processors(templates, installed_apps, option=None):
option = option or DEFAULT_CONTEXT_PROCESSORS_OPTION
processors = defaultdict(list)
for appc in ensure_app_configs(installed_apps):
required_cps = getattr(appc, option, None)
if not required_cps:
contin... | [
"def update_context_processors_from_apps(settings, processors_option=None):\n settings = SettingsDict.ensure(settings)\n installed_apps = settings.get('INSTALLED_APPS')\n templates = settings.get('TEMPLATES')\n if installed_apps and templates:\n add_required_context_processors(templates, installe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update settings module with upper case values from another module. | def update_settings_from_module(settings, module_name, search_base=None, quiet=False):
settings = SettingsDict.ensure(settings)
if search_base is None:
search_base = settings.name.rpartition('.')[0]
module, tried = find_and_import_module(module_name, search=search_base)
if module:
data ... | [
"def load(self, module):\n log = logging.getLogger()\n log.debug(\"Loading settings from '{0}'\".format(module.__file__))\n \n for key, value in module.__dict__.iteritems():\n if not key.startswith('_') and key.isupper():\n setattr(self, key, value)",
"def upd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Will update only a single value from a python module. By default this value is SECRET_KEY, but that can be changed with `setting` argument. If the module doesn't exists, then a new file is created unless `create_if_missing` is False. Module is searched starting at the peer of settings module. Alternative search path ca... | def update_secret_from_file(settings, secret_key_file=None, search_base=None, create_if_missing=True, setting=None):
settings = SettingsDict.ensure(settings)
secret_key_file = secret_key_file or DEFAULT_SECRET_KEY_FILE
setting = setting or 'SECRET_KEY'
if settings.get(setting):
# We already hav... | [
"def find_or_create_secret_key():\n SECRET_KEY_DIR = os.path.dirname(__file__)\n SECRET_KEY_FILEPATH = os.path.join(SECRET_KEY_DIR, 'secret_key.py')\n sys.path.insert(1, SECRET_KEY_DIR)\n\n if os.path.isfile(SECRET_KEY_FILEPATH):\n from .secret_key import SECRET_KEY\n return SECRET_KEY\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update INSTALLED_APPS setting by expanding requirements from AppConfigs | def update_installed_apps(settings, apps_option=None):
settings = SettingsDict.ensure(settings)
installed_apps = settings.get('INSTALLED_APPS')
if installed_apps:
installed_apps = expand_required_apps(installed_apps, option=apps_option)
settings['INSTALLED_APPS'] = installed_apps | [
"def set_installed_apps(self, apps):\n \n # Make sure it's a list.\n apps = list(apps)\n \n # This function will be monkeypatched into place.\n def new_get_apps():\n return apps\n \n # Monkeypatch in!\n models.get_apps_old, models.get_apps = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update TEMPLATES setting by adding context_processors from AppConfigs | def update_context_processors_from_apps(settings, processors_option=None):
settings = SettingsDict.ensure(settings)
installed_apps = settings.get('INSTALLED_APPS')
templates = settings.get('TEMPLATES')
if installed_apps and templates:
add_required_context_processors(templates, installed_apps, op... | [
"def register_context_processors(app: Flask) -> None:\n app.context_processor(inject_get_alerts)\n app.context_processor(inject_get_hidden_alerts)\n app.context_processor(inject_a11y_url)",
"def _create_template_config(self, config):\n pass",
"def add_context_data(app, pagename, templatename, co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap template loaders with cached loader on production (DEBUG = False) | def use_cache_template_loader_in_production(settings, cached_backends=None):
# FIXME: this is done by Django from version 1.11 onwards, thus drop this at some point
settings = SettingsDict.ensure(settings)
debug = settings.get('DEBUG', False)
templates = settings.get('TEMPLATES')
cached_backends = c... | [
"def _load_compilers(self, caller):\n\n debug = dj_settings.DEBUG\n template = ''\n\n if hasattr(dj_settings, 'STATICLINK_CLIENT_COMPILERS'):\n for ext in dj_settings.STATICLINK_CLIENT_COMPILERS:\n if self._is_debug(ext):\n debug = True\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If this pixel is part of an edge, make it blue | def highlight_edges(edges, image):
image[edges>0.01] = [255, 0, 0] | [
"def color_edge(G, edge_id, color):\n G.edge[edge_id[0]][edge_id[1]]['color'] = color",
"def edge_color(e, g, pmap_component, pmap_color, default_color = \"black\"):\n u = g.source(e)\n v = g.target(e)\n color_u = pmap_color[pmap_component[u]]\n color_v = pmap_color[pmap_component[v]]\n return c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If corner intensity is above a certain threshold, make it green | def highlight_significant_corners(corners, image):
# This line is equivalent to the nested loop below, but much faster.
image[corners > 0.01 * corners.max()] = [0, 255, 0]
# for rowIndex in range(len(corners)):
# for pixelIndex in range(len(corners[0])):
# if corners[rowIndex][pixelInd... | [
"def colorThreshold(img, rbg_threshold = (60,60,60)):\n temp = np.zeros(img.shape)\n rflags_h = img[:,:]>rbg_threshold[0]\n\n temp[:,:][rflags_h] = 1\n \n return temp",
"def vertex_binary_color(binary: np.ndarray, x: int, y: int, r: float, r_factor: float, threshold: float) -> int:\n fill_ratio ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Forces an update of the batches no matter the current batch size. Prints errors if there are any. | def update_batches(self):
with self._commit_lock:
self._update_batches_force() | [
"def _update_batch_size(self):\n candidate = None\n for op in self.operations:\n op_batch_size = getattr(op, \"batch_size\", None)\n if op_batch_size is None:\n continue\n if candidate:\n if op_batch_size != candidate:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tries to resubmit failed submissions. | def _retry_failed_submissions(self):
still_failing = []
for create_func, batch_data in self._submission_fails:
try:
self._submit_batches.submit_update(create_func, batch_data)
except SubmitBatchesException:
still_failing.append((create_func, batch... | [
"def resubmit(self):\n self.id = None\n self.submit()",
"def _handle_submission_failure(self, calculation):\n self.abort_nowait('submission failed for the {} in iteration {}, but error handling is not implemented yet'\n .format(SiestaCalculation.__name__, self.ctx.iteration))",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add one object or reference to this batcher based on the arguments passed. | def add(self, **kwargs: dict):
# all keys are mandatory for references
reference_keys = set(['from_object_uuid', 'from_object_class_name', 'from_property_name',\
'to_object_uuid'])
if kwargs.keys() == reference_keys:
with self._commit_lock:
self._la... | [
"def add(self, *args, **kwargs):\n kw = copy.deepcopy(self.standard)\n kw.update(kwargs)\n self.append(self._base(*args, **kw))",
"def add(self, *args, **kwargs):\n nkwargs = kwargs.copy()\n nkwargs['addTo'] = self\n return self._maker.bind(*args, **nkwargs)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Closes this Batcher. Makes sure that all unfinished batches are loaded into weaviate. Batcher is not useable after closing. | def close(self):
# stop watchdog thread
if self._auto_commit_watchdog is not None:
with self._commit_lock:
self._auto_commit_watchdog.is_closed = True
retry_counter = 0
while len(self._objects_batch) > 0 or len(self._reference_batch) > 0 or\
... | [
"def close(self) -> None:\n self._close()\n if hasattr(self, '_iterator'):\n delattr(self, '_iterator')\n self._fire_listeners(EventType.CLOSE)",
"def end_batch(self) -> None:\n self.handle(events.EndBatch())",
"def close(self):\n iter_close(self._response_iter)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the (scaled) coincidence. | def _coincidence(x, y):
coincidence = (x * y).sum()
if scaled:
# Handle division by zero error
denom = x.sum() * y.sum()
if denom == 0:
coincidence = np.nan
else:
coincidence /= denom
... | [
"def coincidence(self):\n self.S2sCoin = []\n for i in range(self.NbS2Peaks):\n coin = 0\n for idx in range(self.nchs):\n if (self.S2s[idx][self.S2s_Key[i]] > 0.5): # TODO: different size for S1 and S2?\n coin += 1\n self.S2sCoin.appe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compare detected events across channels. See full documentation in the methods of SpindlesResults and SWResults. | def compare_channels(self, score="f1", max_distance_sec=0):
from itertools import product
assert score in ["f1", "precision", "recall"], f"Invalid scoring metric: {score}"
# Extract events and channel
detected = self.summary()
chan = detected["Channel"].unique()
# Get ... | [
"def compare_detection(self, other, max_distance_sec=0, other_is_groundtruth=True):\n detected = self.summary()\n if isinstance(other, (SpindlesResults, SWResults, REMResults)):\n groundtruth = other.summary()\n elif isinstance(other, pd.DataFrame):\n assert \"Start\" in o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compare detected events between two detection methods, or against a groundtruth scoring. See full documentation in the methods of SpindlesResults and SWResults. | def compare_detection(self, other, max_distance_sec=0, other_is_groundtruth=True):
detected = self.summary()
if isinstance(other, (SpindlesResults, SWResults, REMResults)):
groundtruth = other.summary()
elif isinstance(other, pd.DataFrame):
assert "Start" in other.columns... | [
"def compare_events(obtained, expected):\n\n result = {'true_positive': 0, 'false_positive': 0, 'false_negative': 0}\n # Iterate over a list of keys (frames) to be able to delete them from the dictionary\n for frame in list(expected):\n exp_event = expected[frame]\n if find_match(frame, exp_e... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot an overlay of the detected events on the signal. | def plot_detection(self):
import matplotlib.pyplot as plt
import ipywidgets as ipy
# Define mask
sf = self._sf
win_size = 10
mask = self.get_mask()
highlight = self._data * mask
highlight = np.where(highlight == 0, np.nan, highlight)
highlight_fil... | [
"def plot_events(obj):\n obj.ax.scatter(obj.event_times[obj.current_position], obj.event_values[obj.current_position],marker='.')\n obj.last_position = len(obj.event_values) - 1",
"def plot_prediction_overlay(tile: np.ndarray, prediction: np.ndarray):\n plt.figure()\n plt.imshow(tile)\n plt.show()"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a summary of the spindles detection, optionally grouped across channels and/or stage. | def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc="mean", sort=True):
return super().summary(
event_type="spindles",
grp_chan=grp_chan,
grp_stage=grp_stage,
aggfunc=aggfunc,
sort=sort,
mask=mask,
) | [
"def add_pruning_summaries(self):\n with tf.name_scope(self._spec.name + '_summaries'):\n tf.summary.scalar('sparsity', self._sparsity)\n tf.summary.scalar('last_mask_update_step', self._last_update_step)\n masks = get_masks()\n thresholds = get_thresholds()\n for mask, threshold in zip(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the (scaled) coincidence matrix. | def get_coincidence_matrix(self, scaled=True):
return super().get_coincidence_matrix(scaled=scaled) | [
"def influence_matrix(self) -> np.ndarray:",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def generate_cnk_matrix(self):\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a summary of the SW detection, optionally grouped across channels and/or stage. | def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc="mean", sort=True):
return super().summary(
event_type="sw",
grp_chan=grp_chan,
grp_stage=grp_stage,
aggfunc=aggfunc,
sort=sort,
mask=mask,
) | [
"def board_summary(self):\n return self._call_summary(GxFpga.GxFpgaGetBoardSummary)",
"def summariseSuiteResult(self, suite):",
"def summary(self, grp_chan=False, grp_stage=False, mask=None, aggfunc=\"mean\", sort=True):\n return super().summary(\n event_type=\"spindles\",\n grp_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the (scaled) coincidence matrix. | def get_coincidence_matrix(self, scaled=True):
return super().get_coincidence_matrix(scaled=scaled) | [
"def influence_matrix(self) -> np.ndarray:",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def generate_cnk_matrix(self):\r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a summary of the REM detection, optionally grouped across stage. | def summary(self, grp_stage=False, mask=None, aggfunc="mean", sort=True):
# ``grp_chan`` is always False for REM detection because the
# REMs are always detected on a combination of LOC and ROC.
return super().summary(
event_type="rem",
grp_chan=False,
grp_sta... | [
"def summarise(self) -> None:\n with open(self.parsed_replay_file, \"r\") as f:\n self.parsed_replay = [line for line in f]\n\n # Some parsing stuff here\n\n self.match_summary = {\n \"match_id\": 123345,\n \"match_date\": \"2019-07-07\", #If we can get it, othe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the raw or filtered data of each detected event after centering to a specific timepoint. | def get_sync_events(
self, center="Peak", time_before=0.4, time_after=0.4, filt=(None, None), mask=None
):
from yasa.others import get_centered_indices
assert time_before >= 0
assert time_after >= 0
bef = int(self._sf * time_before)
aft = int(self._sf * time_after)
... | [
"def get_cursor_data(self, event):\n xmin, xmax, ymin, ymax = self.get_extent()\n if self.origin == 'upper':\n ymin, ymax = ymax, ymin\n arr = self.get_array()\n data_extent = mtransforms.Bbox([[ymin, xmin], [ymax, xmax]])\n array_extent = mtransforms.Bbox([[0, 0], arr.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the values for each argument in a command. | def get_arg_vals(self):
return self.arg_vals | [
"def get_args(command):\n\n arglist = subprocess.Popen('for i in %s; do echo $i; done' % command, \n shell=True, \n stdout=subprocess.PIPE).communicate()[0]\n arglist = [i for i in arglist.split('\\n') if i]\n return arglist",
"def get_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process inputs in batch, stores output or exception in buffer. Blocks until batch is ready for being processed, when batch is ready call a handler to process input data, if an exceptions is raised on handler store exceptions into all DataRequest inside buffer, if exception isn't raised store returned value from handler... | def _procces_in_batch(self) -> None:
if not self._handler:
raise HandlerNotSet()
start_at = time.time()
buffer = self._wait_buffer_ready()
elapsed_time = time.time() - start_at
# When _wait_for_ready_buffer is stopped buffer could be empty
# avoid calling p... | [
"def process_batch(self, batch: BatchType) -> None:\n raise NotImplementedError",
"def process_batch(self, batch: List[Dict[str, Any]]) -> List[Response]:\n pass",
"def process_data(self, data):\n for line in self.buffer.process_data(data):\n try:\n self.process_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap `get_next_page()` to return a list so it's compatible with the Browser class expectation for `get_browsable` | def wrapper_next_page(url):
next_url = get_next_page(url)
if not next_url:
return []
return [next_url] | [
"def get_next_pages(self, driver):\n return driver.find_elements_by_xpath('//*[@class=\"PagerStyle\"]/td/table/tbody/tr/td/a')",
"async def fetch_paginated(\n client, bearer_token: str, url: str, data_key: str\n) -> List[Dict[str, Any]]:\n results: List[Dict[str, Any]] = []\n\n page_url = url # w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the current page is the last one showing listings. | def is_last_page(soup):
for li in soup.find_all("li"):
if li.has_attr("class") and li.attrs["class"] == ["next", "ng-hide"]:
return True
return False | [
"def is_last_page(self):\n return 'last' not in self.links",
"def is_last_page(xml):\n #Get information from the page\n #matched=matched_items(xml)\n first_displayed,last_displayed=current_items(xml)\n #Check lastness\n return first_displayed>last_displayed",
"def is_on_last_item(self):\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the real estate listing ID from the URL. If parsing the ID fails, we return a random string. | def get_listing_id(url):
match = re.search(r"\/(\w+)$", url)
if match:
return match.group(1)
else:
return "".join(random.choice(ascii_letters) for _ in range(10)) | [
"def get_listing_id(url):\n match = re.search(r\"\\/([\\dA-Z\\-]*)$\", url)\n if match:\n return match.group(1)\n else:\n return \"\".join(random.choice(ascii_letters) for _ in range(10))",
"def parseID(self,url):\n\tif validateUrl(url):\n\t splitURL = (url).split(\"/\")\n\t itemID = \"BH... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create department for test | def create_department():
return Department.objects.create(name='Development') | [
"def test_create_department_succeeds(self, client, dept_data):\n\n data = dept_data['test_dept']\n response = client.post('/api/v1/department/', data)\n assert response.status_code == 201\n assert response.data['message'] == SUCCESS['create_entry'].format(\n data['name'])",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method registers signal handlers which will do certain stuff before the core terminates | def register_signal_handler(self):
signal.signal(signal.SIGINT, self.quit_gracefully)
signal.signal(signal.SIGTERM, self.quit_gracefully)
return | [
"def _setup_signal_handling(self):\n signal.signal(signal.SIGINT, self._signal_handler)\n signal.signal(signal.SIGQUIT, self._signal_handler)",
"def configure_signals():\n\n def stopping_handler(signum, frame):\n \"\"\"Handle signal and exit\"\"\"\n frame_data = format_frame(frame)\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find and return positions of pattern in genome. | def positions_of_pattern_in_genome(pattern, genome):
return [i for i in range(len(genome) - len(pattern) + 1) if genome[i:i+len(pattern)] == pattern] | [
"def pattern_indices(pattern,gene_sequence):\n indices = []\n pattern_seen = False\n pattern_start_index = 0\n for i in range(0,len(gene_sequence)-len(pattern)+1):\n tmp = gene_sequence[i:i+len(pattern)]\n if(tmp == pattern):\n indices.append(i) \n return indices",
"def get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse command line and return a socket address. | def parse_command_line(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('host', help='IP or hostname')
parser.add_argument('-p', metavar='port', type=int, default=1060,
help='TCP port (default 1060)')
args = parser.parse_args()
addre... | [
"def parse_address(addr):\n if ':' in addr:\n try:\n host, port = addr.split(':')\n except ValueError:\n raise ValueError('Invalid address: %s' % addr)\n else:\n host, port = 'localhost', addr\n if host == '*':\n host = '' # any\n try:\n return (h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converse with a client over `sock` until they are done talking. | def handle_conversation(sock, address):
try:
while True:
handle_request(sock)
except EOFError:
print('Client socket to {} has closed'.format(address))
except Exception as e:
print('Client {} error: {}'.format(address, e))
finally:
sock.close() | [
"def forward(self, client_sock, server_sock):\r\n \r\n # Once we're here, we are not supposed to \"speak\" with the client\r\n # anymore. So any error means for us to close the connection.\r\n print thread.get_ident(), 'Forwarding.'\r\n # These are not used to anything significant... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive a single client request on `sock` and send the answer. | def handle_request(sock):
aphorism = recv_until(sock, b'?')
answer = get_answer(aphorism)
sock.sendall(answer) | [
"def read_one_line(sock):\r\n newline_received = False\r\n message = \"\"\r\n while not newline_received:\r\n character = sock.recv(1).decode()\r\n if character == '\\n':\r\n newline_received = True\r\n elif character == '\\r':\r\n pass\r\n else:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receive bytes over socket `sock` until we receive the `suffix`. | def recv_until(sock, suffix):
message = sock.recv(4096)
if not message:
raise EOFError('socket closed')
while not message.endswith(suffix):
data = sock.recv(4096)
if not data:
raise IOError('received {!r} then socket closed'.format(message))
message += data
re... | [
"def recv_until(sock, suffix):\n message = sock.recv(4096) # arbitrary value of 4KB\n if not message:\n raise EOFError('socket closed')\n while not message.endswith(suffix):\n data = sock.recv(4096)\n if not data:\n raise IOError('received {!r} then socket closed'.format(me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
adds a complex sequence, such as a cos(), to the signals variable | def addcomplexplot(self, coefficient, frequency, constantPhi):
self.signals.append(coefficient * np.cos(frequency * self.samples + constantPhi))
self.frequencies.append(Fraction(frequency / np.pi))
self.coefficients.append(coefficient) | [
"def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF___iadd__(self, *args)",
"def __iadd__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___iadd__(self, *args)",
"def set_complex_coefficients(self, c0, c1, c2):\n self.c0 = c0\n self.c1 = c1... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the frequency w0 to fraction form, stripes pi from the input and returns the period N of the discretetime signal in a list. Could also return constant k if added | def getperiod(self):
periodN = []
for freq in self.frequencies:
w0fraction = Fraction(freq / np.pi)
periodN.append(w0fraction.denominator * 2)
return periodN | [
"def freq(n, dt):\n import numpy as np\n return 1.0*np.arange(n)/n/dt",
"def ITU_R_468_weighting_analog():\n\n z = [0]\n p = [-25903.70104781628,\n +36379.90893732929j-23615.53521363528,\n -36379.90893732929j-23615.53521363528,\n +62460.15645250649j-18743.74669072136,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |