query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Update ``cookies`` dictionary with cookie ``name`` and its ``morsel``. Optional Keyword arguments, typically, contain ``domain``, ``expires_days``, ``expires``, ``path``, which are set on the Cookie.Morsel directly. ``cookies``, Dictionary like object mapping cookie name and its morsel. It is updated inplace and return... | def set_cookie( cookies, name, morsel, **kwargs ) : | [
"def morsel_to_cookie(morsel):\n c = create_cookie(\n name=morsel.key,\n value=morsel.value,\n version=morsel['version'] or 0,\n port=None,\n port_specified=False,\n domain=morsel['domain'],\n domain_specified=bool(morsel['domain']),\n domain_initial_dot=mo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if this request supports HTTP/1.1 semantics | def supports_http_1_1(): | [
"def supports_http_1_1(self):\r\n return self.version == \"HTTP/1.1\"",
"def test_server_should_be_http_1_1(httpbin):\n resp = get_raw_http_response(httpbin.host, httpbin.port, \"/get\")\n assert resp.startswith(b\"HTTP/1.1\")",
"def checkProtocol(protocol_string):\n\n if protocol_string != \"HT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a signed cookie if it validates, or None. Call to this | def get_secure_cookie( name, value=None ): | [
"def get_secure_cookie(self, name):\n if not name in self.cookies:\n return None\n\n try:\n value, expires, ts, signature = self.cookies[name].value.rsplit('|', 3)\n expires = int(expires)\n ts = int(ts)\n except (AttributeError, ValueError):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if this request is considered finished, which is, when | def has_finished(): | [
"def is_finished(self):\n return False",
"def finished(self):\n return self._state == FINISHED_STATE",
"def is_finished(self):\n return ((self.flag_status() >> 2) & 0x01)",
"def done(self):\n return self._status != Future.STATUS_STARTED",
"def IsCompleted(self) -> bool:",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if this request is received using `chunked` TransferEncoding. | def ischunked() : | [
"def _is_decompressive_transcoding(response, get_headers):\n headers = get_headers(response)\n return (\n headers.get(_STORED_CONTENT_ENCODING_HEADER) == \"gzip\"\n and headers.get(CONTENT_ENCODING_HEADER) != \"gzip\"\n )",
"def decode_chunked(self):\r\n cl = 0\r\n data = Stri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate url for a different webapplication identified by ``instkey``. Typically uses webapp.appurl(). ``instkey``, A tuple of ``(appsec, netpath, configini)`` indexes into platform's `webapps` attribute | def appurl( instkey, name, **matchdict ) : | [
"def build_site_url(app: str, path: str = '') -> str:\n validate_application(app)\n\n if ENV == 'localhost':\n return f'http://{ENV}.transloc.com:{PORT}{path}'\n elif ENV == 'stage':\n return f'https://{app}.{ENV}.transloc.com{path}'\n else:\n return f'https://{app}.{TEAM}.{ENV}.tra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set a response status code. By default it will be 200. | def set_status( code ): | [
"def setResponseCode(code, message=None):",
"def set_status(self, status):\n self.response_dict(status=status)\n self.response.set_status(code=status)",
"def set_status_code(self, status_code):\n if (status_code is not None) and (type(status_code) is int):\n self.status_code = status... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set cookie `name`/`value` with optional ``kwargs``. Keyword arguments typically contains, ``domain``, ``expires_days``, ``expires``, ``path``. Additional keyword arguments are set on the Cookie.Morsel directly. By calling this method cookies attribute will be updated inplace. See | def set_cookie( name, value, **kwargs ) : | [
"def set_cookie( cookies, name, morsel, **kwargs ) :",
"def setCookie(name, value, **kw):",
"def set_cookie(key, value='', **kwargs):",
"def set_secure_cookie( name, value, **kwargs ):",
"def set_zope_cookie(response, cookie_name, value, expires, path):\n \n response.setCookie(cookie_name, value, expi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Similar to set_cookie() method, additionally signs and timestamps a cookie value so it cannot be forged. Uses | def set_secure_cookie( name, value, **kwargs ): | [
"def set_cookie(key, value='', **kwargs):",
"def set_cookie( name, value, **kwargs ) :",
"def set_secure_cookie(self, name, val, remember):\n\n cookie_val = make_secure_val(val)\n cookie_str = '%s=%s; Path=/;' % (name, cookie_val)\n if remember:\n expires = time.time() + 5000 * 2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes all the cookies the user sent with this request. | def clear_all_cookies(): | [
"def clear_all_cookies(self):\r\n for name in self.request.cookies:\r\n self.clear_cookie(name)",
"def clear_cookies(self):\n self.base_driver.delete_all_cookies()",
"def clear_cookies(self):\n self.driver.delete_all_cookies()",
"def delete_all_cookies(self):\n self.log.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subscribe a ``callback`` function, to be called when this response is finished. | def set_finish_callback( callback ): | [
"def handle_response(self, callback):\n\n self.log.info(\"Received callback for subscription %s\", self.service_id)\n self.log.info(callback)\n\n # handle callbacks\n self.handle_callbacks()",
"def add_done_callback(self, callback):\n self._loop.call_soon_threadsafe(self.future.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if this response is transferred using `chunked` TransferEncoding. | def ischunked() : | [
"def _is_decompressive_transcoding(response, get_headers):\n headers = get_headers(response)\n return (\n headers.get(_STORED_CONTENT_ENCODING_HEADER) == \"gzip\"\n and headers.get(CONTENT_ENCODING_HEADER) != \"gzip\"\n )",
"def IsHandshakeComplete(self):\n if (self.GetHeadersOut() and s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Flushes the responseheader (if not written already) to the socket connection. Then flushes the writebuffer to the socket connection. ``finishing``, If True, signifies that data written since the last flush() on this response instance is the last chunk. It will also flush the trailers at the end of the chunked response.... | def flush( finishing=False, callback=None ): | [
"def write(self, chunk, callback=None):\r\n if not self.stream.closed():\r\n self._write_callback = stack_context.wrap(callback)\r\n self.stream.write(chunk, self._on_write_complete)",
"def end_request(self):\n if not self._chunked:\n return\n trailers = [(n, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a generator, which, for every iteration will call the ``callback`` function with ``request`` and ``c`` arguments, which are preserved till the iteration is over. The call back should return a a tuple representing a chunk, ``(chunk_size, chunk_ext, chunk_data)`` this will formatted into a response chunk and sent ... | def chunk_generator( callback, request, c ): | [
"def _request_generator(request, data_handler):\n # First, the request header.\n yield data_handler.request_to_bytes(request)\n\n # Then, for the body. The body can be bytes or an iterator, but that's it.\n # The iterator is the more general case, so let's transform the bytes into\n # an iterator via... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate plugin with `viewname` and `view` attributes. | def __init__( viewname, view ): | [
"def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)",
"def factory(parent, ui):\n\n \"\"\"\n # They don't instantiate the object directly. They use plugin manager!\n \n vm = pluginManager.getPluginObject(\"viewmanager\", viewManagerStr)\n if vm is None:\n # load t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform outgoing message entity. ``request.response`` will be updated inplace. ``request``, | def transform( request, data, finishing=False ): | [
"def apply_response(self, request):\n assert request.response is not None\n response = request.response\n\n other_addr = self.get_other_address()\n\n self.processor.process_command(\n other_addr=other_addr,\n command=request.command,\n cid=request.cid,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Endpoint for API requests given book isbn | def api(isbn):
# Ensure valid isbn-10 format provided
if len(isbn) != 10:
response = make_response(
jsonify("Please provide a valid ISBN-10"), 404)
response.headers['X-Error'] = "Please provide a valid ISBN-10"
return response
# Ensure requested book is in our database
... | [
"def fetch_details_from_isbn(isbn):\n books_api_base_url = 'https://www.googleapis.com/books/v1/volumes'\n query_params = {\n 'q': isbn,\n 'country': 'US',\n }\n response = requests.request('GET', books_api_base_url, params=query_params)\n return response",
"def api(isbn):\n\n\tresponse = {}\n\n\tdata ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renders books containing search query | def search():
try:
query = request.args.get("q").lower()
except AttributeError:
query = request.args.get("q")
# Adding browse functionality
browse = request.args.get("browse")
if browse is None:
# Select all rows with a column value that includes query
results = db.... | [
"def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)",
"def filter_books():\n if request.method != ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This funtion is used to read the training data with over sampling. I tried the performance of this method and because of the negative result I do not use it in the final model. | def loadtrainData_oversampling():
pre_x = []
pre_y = []
fileIn = open(PATH + 'traindata_Subtask4.txt')
for line in fileIn.readlines():
lineArr = line.strip().split()
pre_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])
pre_y.append(int(lineArr[-1]))
ros = Random... | [
"def loadtrainData_undersampling():\n train = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train.append([float(lineArr[i]) for i in range(len(lineArr))])\n\n pos = []\n neg = []\n for i in train:\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This funtion is used to read the training data with under sampling. In my training set, it including 153 positive samples and 3201 negative samples. By using this function, we can get all the positive samples and the same number of negative samples. | def loadtrainData_undersampling():
train = []
fileIn = open(PATH + 'traindata_Subtask4.txt')
for line in fileIn.readlines():
lineArr = line.strip().split()
train.append([float(lineArr[i]) for i in range(len(lineArr))])
pos = []
neg = []
for i in train:
if i[-1] == 1.0:
... | [
"def loadtrainData_oversampling():\n pre_x = []\n pre_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n pre_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n pre_y.append(int(lineArr[-1]))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is used to predict the data by the logistice model. The input is the weights of logistics model, the dev_x and the label 'dev_y'. The output is the prediction result. | def testLogRegres(weights, dev_x, dev_y):
predict_y = []
numSamples, numFeatures = np.shape(dev_x)
for i in range(numSamples):
if sigmoid(dev_x[i, :] * weights) > 0.5:
label = 1
else:
label = 0
predict_y.append(label)
print('Congratulations, testing comple... | [
"def logistic_predict(self, x: np.array) -> np.array:\r\n if self.LogisticModel is None:\r\n print('Logistic Model not trained, please run logistic_fit first!')\r\n return None\r\n else:\r\n return self.LogisticModel.predict(x)",
"def predict_logit(self, x):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get targets from either the sample or the net's output. | def get_targets(self, sample, net_output):
return sample["target"] | [
"def get_targets(self, sample, net_output):\n return sample[self.kind][\"target\"]",
"def getTargets(self):\r\n targets = [data.getTarget() for data in self.datas]\r\n return np.array(targets)",
"def getTargets(self,func,noise=1):\n targets = np.zeros((self.nSamples))\n for i ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get normalized probabilities (or log probs) from a net's output. | def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
return self.get_normalized_probs_scriptable(net_output, log_probs, sample) | [
"def get_normalized_probs(\n self,\n net_output: Tuple[Tensor, Dict[str, List[Optional[Tensor]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n return self.get_normalized_probs_scriptable(net_output, log_probs, sample)",
"def __get_net_probs(self):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the forward pass for an encoderdecoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., teacher forcing) to | def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out | [
"def forward(self, inputs_encoder, inputs_decoder):\n states_encoder = self.encoder(inputs_encoder)\n outputs_decoder, states_decoder = self.decoder(inputs_decoder, states_encoder)\n return outputs_decoder, states_decoder",
"def forward(self, src_tokens, src_lengths, prev_output_tokens, conte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to build shared embeddings for a set of languages after checking that all dicts corresponding to those languages are equivalent. | def build_shared_embeddings(
dicts: Dict[str, Dictionary],
langs: List[str],
embed_dim: int,
build_embedding: callable,
pretrained_embed_path: Optional[str] = None,
):
shared_dict = dicts[langs[0]]
if any(dicts[lang] != shared_dict for lang in langs):
... | [
"def common_languages(programmers: dict):\n lang_sets = [set(languages) for languages in programmers.values()]\n return set.intersection(*lang_sets)",
"def get_common(self, other, mapping):\n\n self_oov = defaultdict(lambda: 0)\n other_oov = defaultdict(lambda: 0)\n self_word_id = deepc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run the forward pass for a decoderonly model. Feeds a batch of tokens through the decoder to predict the next tokens. | def forward(self, src_tokens, **kwargs):
return self.decoder(src_tokens, **kwargs) | [
"def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)\n decoder_out = self.decoder(\n prev_output_tokens, encoder_out=encoder_out, **kwargs\n )\n return decoder_out",
"def forwar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
append questions to meetups | def questions_meetups(cls):
for meetup in MEETUPS_LIST:
for question in QUESTIONS_LIST:
if meetup["meetup_id"] == question["meetup"]:
meetups = MEETUPS_LIST.append(question)
return meetups | [
"def append_question(self, question_bs):\n self.external_id = question_bs['Id']\n self.title = question_bs['Title']\n self.tags = question_bs['Tags']\n self.texts.append(question_bs['Body'])\n self.creation_date = question_bs['CreationDate'][:10]\n try:\n self.po... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort given items by splitting list into two approximately equal halves, sorting each with an iterative sorting algorithm, and merging results into a list in sorted order. | def split_sort_merge(items):
# TODO: Split items list into approximately equal halves
pivot = len(items) // 2
# TODO: Sort each half using any other sorting algorithm
# sort first half in-place (insertion sort)
left = insertion_sort(items[:pivot])
right = insertion_sort(items[pivot:])
# TOD... | [
"def split_sort_merge(items):\n # TODO: Split items list into approximately equal halves\n # TODO: Sort each half using any other sorting algorithm\n # TODO: Merge sorted halves into one list in sorted order",
"def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for retrieve_iso20022_account_statement | def test_retrieve_iso20022_account_statement(self):
pass | [
"def test_retrieve_iso20022_account_statement_ids(self):\n pass",
"def test_client_bank_account_retrieve(self):\n pass",
"def test_retrieve_account(self):\n pass",
"def test_get_account(self):\n pass",
"def test_account_get(self):\n pass",
"def test_lookup_account(self):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for retrieve_iso20022_account_statement_ids | def test_retrieve_iso20022_account_statement_ids(self):
pass | [
"def test_retrieve_iso20022_account_statement(self):\n pass",
"def get_account_ids(response):\n return [account['Id'] for account in response[0]]",
"def test_get_account_statements(self):\n query_string = [('month', 'month_example')]\n response = self.client.open(\n '/paySmart... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for retrieve_iso20022_payment_instruction | def test_retrieve_iso20022_payment_instruction(self):
pass | [
"def test_retrieve_iso20022_payment_instruction_status_report(self):\n pass",
"def test_submit_iso20022_payment_instruction(self):\n pass",
"def test_get_pay_in_details(self):\n pass",
"def test_get_payment_request(self):\n pass",
"def test_retrieve_payment_source(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for retrieve_iso20022_payment_instruction_status_report | def test_retrieve_iso20022_payment_instruction_status_report(self):
pass | [
"def test_retrieve_iso20022_payment_instruction(self):\n pass",
"def test_submit_iso20022_payment_instruction(self):\n pass",
"def test_get_pay_in_details(self):\n pass",
"def test_get_verification_status(self):\n pass",
"def test_status_reporting(self):\n pass",
"def te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for submit_iso20022_payment_instruction | def test_submit_iso20022_payment_instruction(self):
pass | [
"def test_retrieve_iso20022_payment_instruction(self):\n pass",
"def test_make_payment_request(self):\n pass",
"def test_retrieve_iso20022_payment_instruction_status_report(self):\n pass",
"def test_make_payment_request_confirmation(self):\n pass",
"def test_modify_payment_reques... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the federalist papers as a tokenized list of strings, one for each eassay | def load_federalist_corpus(filename):
with open(filename, "rt") as f:
data = f.read()
papers = data.split("FEDERALIST")
# all start with "To the people of the State of New York:" (sometimes . instead of :)
# all end with PUBLIUS (or no end at all)
locations = [(i, [-1] + [m.end() + 1 for m ... | [
"def load_primers(tsv_filename):\n answer = []\n with open(tsv_filename) as handle:\n for line in handle:\n if line.startswith(\"#\"):\n continue\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n if len(parts) == 2:\n left, right = parts\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create TFIDF matrix. This function creates a TFIDF matrix from the docs input. | def tfidf(docs):
vocab = {}
df = {}
regex = re.compile("\s+")
count = 0
for doc in docs:
terms = re.split(regex, doc)
for term in set(terms):
if len(term) > 0:
if term not in vocab:
vocab[term] = count # (index, df)
... | [
"def build_tfidf_matrix(tf, matrix_file_path, matrix_name):\n assert isinstance(tf, sparse.csr.csr_matrix)\n assert isinstance(matrix_file_path, str) and isinstance(matrix_name, str)\n matrix_path = os.path.join(matrix_file_path, matrix_name)\n if os.path.exists(matrix_path):\n print_warning('Use... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a matrix of cosine similarities. | def cosine_similarity(X):
matrix = X.dot(X.transpose()).todense()
mat_len = len(matrix)
norms = [0] * mat_len
for i in range(0, mat_len):
norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))
norm_mat = np.matrix(norms)
return np.multiply(norm_mat.transpose().dot(norm_mat), matrix) | [
"def cosineDistanceMatrix():\n\n\tmatrix = movieMatrix()\n\tsimilarity = np.dot(matrix, matrix.T)\n\tsquareMag = np.diag(similarity)\n\tinvSquareMag = 1/squareMag\n\tinvSquareMag[np.isinf(invSquareMag)]=0\n\tinvMag = np.sqrt(invSquareMag)\n\tcosine = similarity * invMag\n\tcosine = cosine.T * invMag\n\treturn cosin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a random sample of k words. | def sample(self, k):
result = ""
current = self.gen_beginning()
for i in range(0, k):
result += current[0] + " "
t = tuple(current)
if t in self.dict:
c_sum = self.dict[t][self.sum_index]
rand = random.randint(0, c_sum)
... | [
"def generate_k(data_set, k):\n return random.sample(data_set, k)",
"def sample(words, n=10) -> str:\n return [random.choice(words) for _ in range(n)]",
"def generate_k(data_set, k):\n return random.sample(data_set, k)\n # raise NotImplementedError()",
"def random_sample(population, k):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift the colours to associate a value standing anywhere in the new cmap (relatively to the two extremes start & stop or min & max) with whichever value / colour of the input cmap (by default the midpoint). If the input cmap is divergent, this will be white by default. The locpoint value cannot be the min or max (start... | def shift_cmap(cmap, start=0., locpoint=0.5, stop=1.0, name='centered'):
# declare a colour + transparency dictionary
cdict={'red':[], 'green':[], 'blue':[], 'alpha':[]}
# regular index to compute the colors
RegInd = np.linspace(start, stop, cmap.N)
# shifted index to match what the data ... | [
"def define_plot_cmap(\n fig,\n ax,\n mid_point,\n cmap,\n ticks,\n labels,\n cmap_label,\n):\n\n new_cmap = shiftedColorMap(\n cmap,\n midpoint=mid_point,\n name='shifted'\n )\n X = np.linspace(0, 1, 256)\n cax = ax.scatter(-X-100, -X-100, c=X, cmap=new_cmap)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use the approximated eigenvector returned by Power Iteration method to compute the largest eigenvalue of the matrix A | def compute_largest_eigenvalue(A, num_simulations):
b_k = power_iteration(A, num_simulations)
return b_k.dot(A).dot(b_k) / (b_k.dot(b_k)) | [
"def check(mat, otp):\n prd = mat*otp\n eigval = prd[0]/otp[0]\n print 'computed eigenvalue :' , eigval\n [eigs, vecs] = np.linalg.eig(mat)\n abseigs = list(abs(eigs))\n ind = abseigs.index(max(abseigs))\n print ' largest eigenvalue :', eigs[ind]",
"def analytical_eig(A):\n n = len(A)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load from SAM formatted tag. | def from_sam(column):
tag = Tag.__new__(Tag)
header = TagHeader()
tag._header = header
header.tag, value_type, tag._buffer = column.split(b':', 2)
header.value_type = int(value_type)
return tag | [
"def _load(input, spec):\n return Tagger(\n input=input,\n specs=spec.get('specs', []),\n default_tag=spec.get('default_tag', None),\n match_all=spec.get('match_all', False)\n )",
"def parse_and_load(self):",
"def load(fp):\n tag_type = ord(fp.read(1)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Duplicate the tag instance and underlying buffer. | def copy(self):
new = Tag.__new__(Tag)
new._header = TagHeader.from_buffer_copy(self._header)
new._buffer = bytearray(self._buffer)
return new | [
"def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp",
"def clone(self, *args, **kwargs):\r\n overrides = kwargs.setdefault(\"overrides\", {})\r\n overrides.setdefault(\"name\", \"C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if VM has already been registered. | def is_registered(self, thevm):
return self.is_registered_vm_ref(thevm.get_id()) | [
"def is_registered(self):\n return self._is_registered",
"def is_registered(self) -> bool:\n from arkouda.util import is_registered\n\n if self.registered_name is None:\n return False\n return is_registered(self.registered_name)",
"def is_new_vm_needed(self):\n if l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is for processing a vmrecord and determining the course of action that should be taken. | def process_vmrecord(self, vmref, vmrecord):
is_monitored = self.is_registered_vm_ref(vmref)
should_monitor = self._should_monitor(vmrecord)
if not is_monitored and should_monitor:
self.start_monitoring(vmref)
elif is_monitored and not should_monitor:
self.stop_mo... | [
"def processRecord(self,marc_record):\r\n marc_record = self.validate001(marc_record)\r\n marc_record = self.validate006(marc_record)\r\n marc_record = self.remove020(marc_record)\r\n marc_record = self.validate245(marc_record)\r\n marc_record = self.validate300(marc_record)\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tidy TLS secrets after vmdestroy | def process_vm_del(self, vm_ref):
if vm_ref in self.tls_secret_cache:
for key in tls_secret.XSCONTAINER_TLS_KEYS:
if key in self.tls_secret_cache[vm_ref]:
secret_uuid = self.tls_secret_cache[vm_ref][key]
session = self.host.get_session()
... | [
"def terraform_destroy():\n return subprocess.call([\n \"terraform\",\n \"destroy\",\n \"-var-file=terraform/aws/security.tfvars\",\n \"terraform/aws\"\n ])",
"def prepare_secrets(c, rebuild_venv=False, no_secret_cache=False):\n cli_tasks.prepare_secrets.run(c, rebuild_venv, n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a string representation of the given Bouquet. | def bouquet_to_string(bouquet: Bouquet) -> str:
flowers = sorted(bouquet.flowers.items())
flower_quantities = (f"{count}{flower.species}" for flower, count in flowers)
return "".join(chain(bouquet.name, bouquet.size, flower_quantities)) | [
"def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = \"<xbout.BoutDataset>\\n\" + \\\n \"Contains:\\n{}\\n\".format(str(self.data)) + \\\n \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n if self.options:\n text += \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an approximation of the design's complexity to create. | def design_complexity(design: Design) -> int:
diversity = 3 * len(design.required)
abundance = 2 * sum(design.required.values())
return diversity + abundance + design.additional | [
"def complexity(self):\n raise NotImplementedError()",
"def complexity(self):\r\n\t\toutput = 0\r\n\t\tfor node in self.choices[:] + [self.decisionNode, self.utilityNode]:\r\n\t\t\tparentValues = 0\r\n\t\t\tfor key, edges in self.edges.items():\r\n\t\t\t\tif node.name in edges:\r\n\t\t\t\t\tparentNode = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a dict of flowers and amount required across all designs. | def flower_demand(designs: Iterable[Design]) -> FlowerCounter:
elements = (design.required.elements() for design in designs)
return Counter(chain.from_iterable(elements)) | [
"def list_flowers():\n return {\"flowers\": list(all_flowers_genotype_map)}",
"def produceBouqet(self):\n for design in self._designs:\n\n designFlowers = design.getFlowers()\n\n flowers = self._flowersL if design.getSize() == 'L' else self._flowersS\n total = self._tota... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse position packet to extract x,y,z coordinates. | def handle_position(data: bytes) -> Tuple[bytes, str]:
x, y, z = struct.unpack('fff', data[0:3 * 4])
return data[20:], f'Current Position (x,y,z): {x} {y} {z}' | [
"def _parse_compressed_position(cls, data: str) -> Tuple[\n float, float, Optional[float], Optional[float], Optional[float], Optional[float],\n Optional[CompressionFix], Optional[CompressionSource], Optional[CompressionOrigin]\n ]:\n\n if len(data) < 13:\n raise ValueError(\"Compr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse jump packet to determine whether character is jumping. | def handle_jump(data: bytes) -> Tuple[bytes, str]:
jumping = struct.unpack('?', data[:1])[0]
return data[1:], 'Jumping' if jumping else 'Falling' | [
"def parse_jump(self, jump_tok_name, jump_name: str, constructor, instrs: List[Instr]):\n self.eat(jump_tok_name)\n target = self.next()\n if target.name != Tok.LABEL:\n raise ParseError(f\"expected label target for {jump_name}, got {display_token_name(target.name)}\")\n instrs.append(constructor(t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse sneak packet to determine whether character is sneaking. | def handle_sneak(data: bytes) -> Tuple[bytes, str]:
sneaking = not(struct.unpack('?', data[:1])[0])
return data[1:], 'Sneaking' if sneaking else 'Done sneaking' | [
"def is_valid_ssdp_packet(data: bytes) -> bool:\n return (\n bool(data)\n and b\"\\n\" in data\n and (\n data.startswith(b\"NOTIFY * HTTP/1.1\")\n or data.startswith(b\"M-SEARCH * HTTP/1.1\")\n or data.startswith(b\"HTTP/1.1 200 OK\")\n )\n )",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse slot packet to get new selected slot. | def handle_slot_select(data: bytes) -> Tuple[bytes, str]:
new_slot = struct.unpack('B', data[:1])[0]
return data[1:], f'New slot: {new_slot}' | [
"def _parse_slot(self):\n for device in self.current_slot.content:\n self.new_element(device)\n self.new_element()",
"def ParseSlot(self, G, node):\n slot = BLNlpClipsSlotMap()\n slot.Name = node\n return slot",
"def slot(self):\n if self.__slot in ApexAP1000... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse shoot packet to get name and direction of weapon shot. | def handle_shoot(data: bytes) -> Tuple[bytes, str]:
length = struct.unpack('H', data[:2])[0]
name = data[2:length+2]
direction = struct.unpack('fff', data[length+2:length+2+12])
return data[2+length:], f'Shot {name.decode()} in direction: {direction}' | [
"def shoot(self, direction):\n self.type = self.boss.get_bullet_type()\n if self.type == 'shotgun':\n try:\n dx = abs(Laser.List[-1].x - self.x)\n dy = abs(Laser.List[-1].y - self.y)\n if dx < 50 and dy < 50 and self.type == 'shotgun':\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse actor drop packet to get actor information. Message displays actor name and drop position. If the actor is a "Drop" object, send loot packet to server to automatically pick it up. | def handle_actor_drop(data: bytes) -> Tuple[bytes, str]:
# TODO: reverse first 9 bytes
item_id = struct.unpack('I', data[:4])[0]
unknown = struct.unpack('I', data[4:8])[0] # noqa: F841
unknown2 = data[9] # noqa: F841
item_name_length = struct.unpack('H', data[9:11])[0]
item_name = data[11:11+i... | [
"def do_drop(self, arg):\r\n\r\n # put this value in a more suitably named variable\r\n itemToDrop = arg.lower()\r\n\r\n # get a list of all \"description words\" for each item in the inventory\r\n invDescWords = getAllDescWords(inventory)\r\n\r\n # find out if the player doesn't ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse regionchange packet to get region name. Drop positions of initial actors, like GoldenEggs, is also revealed. | def handle_region_change(data: bytes) -> Tuple[bytes, str]:
region_name_length = struct.unpack('H', data[:2])[0]
region_name = data[2:2+region_name_length]
return (data[2+region_name_length:],
f'Changing to region: {region_name.decode().upper()}') | [
"def get_region_name(self, i):\n for region in self.regions:\n if region['id'] == i:\n return region['name']\n return 'Unknown Matchmaking Region'",
"def region_name(self):\n return self.random_element(self._regions)[1]",
"def find_region(x):\n name = x['Name']\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse itemacquire packet to get information about item. For example, indicate the type and amount of ammo received by a Drop. | def handle_item_acquire(data: bytes) -> Tuple[bytes, str]:
item_name_length = struct.unpack('H', data[:2])[0]
item_name = data[2:2+item_name_length].decode(helpers.ENCODING)
amount = struct.unpack('I',
data[2+item_name_length:2+item_name_length+4])[0]
return data[2+item_name_l... | [
"def parse_item(raw_item: str) -> Item:\n name, cost, damage, armor = raw_item.split()\n\n return Item(name, int(cost), int(damage), int(armor))",
"def test_acquire(self):\n cmd = Acquire(10, Discriminator(name='test_disc', params={'test_params': 1.0}),\n Kernel(name='test_kern',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse itempickup packet to get ID of item picked up. This packet can be used to implement autolooting. | def handle_item_pickup(data: bytes) -> Tuple[bytes, str]:
item_id = struct.unpack('I', data[:4])[0]
return data[4:], f'Picked up item with ID {item_id}' | [
"def item_id(self):\n return self._item_id",
"def parse_item(self, item):\n if self.has_iattr(item.conf, ITEM_TAG[0]):\n return self.update_item",
"def item2id(self):\n if self._item2id is None:\n self._item2id = dict(zip(self.item_unique_vals, range(self.n_items)))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse reload packet to get weapon reloaded and type and amount of ammo. This packet can also be used to implement autoreloading. | def handle_reload(data: bytes) -> Tuple[bytes, str]:
try:
weapon_name_length = struct.unpack('H', data[:2])[0]
weapon_name = data[2:2+weapon_name_length].decode(helpers.ENCODING)
ammo_name_length = struct.unpack('H',
data[2+weapon_name_length:2+weapon... | [
"def handle_loaded_ammo(data: bytes) -> Tuple[bytes, str]:\n weapon_name_length = struct.unpack('H', data[:2])[0]\n weapon_name = data[2:2+weapon_name_length].decode(helpers.ENCODING)\n loaded_ammo = struct.unpack('I',\n data[2+weapon_name_length:2+weapon_name_length+4])[0] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse health packet to get amount of HP for actors. | def handle_health(data: bytes) -> Tuple[bytes, str]:
actor_id, hp = struct.unpack('Ih', data[:6])
return data[6:], f'Actor {actor_id} has {hp} HP' | [
"def get_hp():\n\n return character['HP']",
"def get_character_health(character: dict):\r\n print(\"Your health is: %d\" % character['HP'])",
"def get_hp(self):\n return self.hp",
"def get_health(self):\n return (self.body + self.soul) * 5",
"def getHealth(self):\r\n return self.h... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse mana packet to get amount of mana player has. | def handle_mana(data: bytes) -> Tuple[bytes, str]:
mana = struct.unpack('H', data[:2])[0]
return data[2:], f'Player has {mana} mana' | [
"def get_card_generic_mana(card_face: CardFace) -> int:\n if not card_face.mana_cost:\n return 0\n\n generic_mana = RE_GENERIC_MANA.search(card_face.mana_cost)\n if generic_mana:\n return int(generic_mana.group(1))\n return 0",
"async def mana():\n acc = Account(\"travelfeed\")\n m... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse actor state packet to get state of actors. | def handle_state(data: bytes) -> Tuple[bytes, str]:
actor_id, state_length = struct.unpack('IH', data[:6])
state = data[6:6+state_length].decode(helpers.ENCODING)
return data[6+state_length:], f'Actor {actor_id} in {state} state' | [
"def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse attack packet to get actor that attacked a victim and the attack. | def handle_attack(data: bytes) -> Tuple[bytes, str]:
attacker_id, attack_length = struct.unpack('IH', data[:6])
attack = data[6:6+attack_length].decode(helpers.ENCODING)
victim_id = struct.unpack('I', data[6+attack_length:6+attack_length+4])[0]
return (data[6+attack_length+4:],
f'Actor {atta... | [
"def get_victim (self):\n return self.get_suspect(self.victim)",
"def process_action(self, attacking_agent, action_dict, **kwargs):\n if self._get_action_from_dict(action_dict):\n for attacked_agent in self.agents.values():\n if attacked_agent.id == attacking_agent.id:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse loaded ammo packet after firing weapon to get amount left. Implements autoreloading by sending an empty reload packet once the loaded ammo is 0. | def handle_loaded_ammo(data: bytes) -> Tuple[bytes, str]:
weapon_name_length = struct.unpack('H', data[:2])[0]
weapon_name = data[2:2+weapon_name_length].decode(helpers.ENCODING)
loaded_ammo = struct.unpack('I',
data[2+weapon_name_length:2+weapon_name_length+4])[0] # noqa: E... | [
"def reload_all_ammo(protocol, unit_dbref):\n\n # Looks something like: A:0/5,A(R):3/5,A(R):4/5\n damages = yield btgetxcodevalue(protocol, unit_dbref, 'mechdamage')\n section_split = damages.split(',')\n # This will store the new modified values for section damages.\n modified_sections = []\n for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that get_connection calls psftp.Connection with the correct values | def test_get_connection_settings(self, connection_mock): # pylint: disable=no-self-use
connection = get_connection()
connection_mock.assert_called_once_with(
host=EXAMS_SFTP_HOST,
port=int(EXAMS_SFTP_PORT),
username=EXAMS_SFTP_USERNAME,
password=EXAMS_SFT... | [
"def test_get_connection_established(self):\n module = MagicMock()\n connection = slxos.get_connection(module)\n self.assertEqual(connection, module.slxos_connection)",
"def test_get_connection_new(self, connection):\n socket_path = \"little red riding hood\"\n module = MagicMoc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that get_connection ImproperlyConfigured if settings.{0} is not set | def test_get_connection_missing_settings(self, settings_key, connection_mock):
kwargs = {settings_key: None}
with self.settings(**kwargs):
with self.assertRaises(ImproperlyConfigured) as cm:
get_connection()
connection_mock.assert_not_called()
assert... | [
"def test_get_connection_settings(self):\n assert get_connection_settings() == TEST_SETTINGS[\"connections\"][\"default\"]\n assert (\n get_connection_settings(\"backup\") == TEST_SETTINGS[\"connections\"][\"backup\"]\n )",
"def test_validate_settings_on_missing(self):\n set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Smoothing should be a number, bbox_x = [minx, maxx] | def get_res(smoothing, bbox_x, bbox_y):
def res(bbox, sm):
return int((bbox_x[1] - bbox_x[0])/sm)
return [res(bbox_x, smoothing), res(bbox_y, smoothing)] | [
"def smoothing_kernel(self,x):\n \n xmax = self.xmax\n xmin = self.xmin\n \n #xmax = max(x)\n #xmin = min(x)\n \n #L = self.master.L\n w = xmax-xmin\n #xi = (x-xmin)/w\n #kernel = xi*(1-xi)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DG is the datagrid, and sound is a callable function that returns the sound speed (see toomre.py). res_elem gives the size of the resolution element in simulation units (used to convert the mass in a pixel to surface density) It automatically masks regions where there are no particles, which can be used through cmap.se... | def get_toomre_Q(DG, sound, res_elem):
area = res_elem**2
gas_sd = DG.gas_data['masses']/area
star_sd = DG.star_data['masses']/area
gas_v = DG.gas_data['velocities'] # note this is actually v/r
gas_d = DG.gas_data['densities']
# Surface density reasoning see 1503.07873v1
gas_q = toom.Q_g... | [
"def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the number of particles within the bin radii, useful for seeing how the disk stabalises (does it transport mass into the centre?) | def n_particles_bins(DG, bins=[0, 0.5, 3, 10, 100]):
radii = fid.rss(DG.gas['Coordinates'][()])
hist, bin_edges = np.histogram(radii, bins)
return hist, bin_edges | [
"def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that fetch_inventory_and_error adds entries to database_inv_sig and that the execution time is smaller when fetching an already existing entry | def test_fecth_inventory_and_error():
# build
for key in divHretention.database_inv_sig:
# ensuring an empty database
del divHretention.database_inv_sig[key]
# test
test_time = 1e3
start_time = time.time()
inv, sig = divHretention.fetch_inventory_and_error(test_time)
long_ti... | [
"def fetch_inventory_and_error(time):\n if time in database_inv_sig.keys(): # fetch in database\n inv_T_c_local = database_inv_sig[time][\"inv\"]\n sig_inv_local = database_inv_sig[time][\"sig\"]\n else: # if time is not in the database\n GP = estimate_inventory_with_gp_regression(time=... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that compute_inventory runs correctly | def test_compute_inventory():
T = [1000]
c_max = [1e20]
time = 1e3
inv, sig = divHretention.compute_inventory(T, c_max, time)
assert len(inv) == len(sig)
assert len(inv) == len(T) | [
"def check_inventory(self) -> None:\n self.store.check_inventory()",
"def test_inventory_1(self):\n sys, ob1, feat1, per1, r1 = self.set_up_1()\n self.assertFalse(sys.inventory())",
"def test_inventory_2(self):\n sys, ob1, feat1, per1, r1 = self.set_up_1()\n sys.take(ob1.get_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that compute_inventory raises a TypeError when a float is given | def test_compute_inventory_float():
T = 1000
c_max = 1e20
time = 1e3
with pytest.raises(TypeError):
inv, sig = divHretention.compute_inventory(T, c_max, time) | [
"def check_for_float(check):",
"def test_two_floats(self):\n self.assertRaises(TypeError, product, 2, 1.3)",
"def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)",
"def _check_param(self, param)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs compute_c_max with isotope H and checks that the correct value is produced | def test_compute_c_max_h():
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.c... | [
"def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n ou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs compute_c_max with isotope D and checks that the correct value is produced | def test_compute_c_max_D():
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
c_max = divHretention.c... | [
"def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n ou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs compute_c_max and checks that the correct output | def test_compute_c_max_output():
# build
T = np.array([600, 500])
E_ion = np.array([20, 10])
E_atom = np.array([30, 40])
angles_ion = np.array([60, 60])
angles_atom = np.array([60, 60])
ion_flux = np.array([1e21, 1e20])
atom_flux = np.array([2e21, 2e20])
# run
output = divHreten... | [
"def test_cmax(self):\n cbca_obj = aggregation.AbstractAggregation(**{'aggregation_method': 'cbca',\n 'cbca_intensity': 5., 'cbca_distance': 3})\n\n cv_aggreg = cbca_obj.cost_volume_aggregation(self.ref, self.sec, self.cv)\n\n # Check if the cal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dict for our Ansible facts. | def _facts(facts):
return {'swift_facts': facts} | [
"def provides_facts():\n return {\n \"srx_cluster\": \"A boolean indicating if the device is part of an \"\n \"SRX cluster.\",\n \"srx_cluster_id\": \"A string containing the configured cluster id\",\n \"srx_cluster_redundancy_group\": \"A multi-level dictionary of \"\n \"infor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load environment or sourced credentials. If the credentials are specified in either environment variables or in a credential file the sourced variables will be loaded IF the not set within the ``module.params``. | def _env_vars(self, cred_file=None, section='default'):
if cred_file:
parser = ConfigParser.SafeConfigParser()
parser.optionxform = str
parser.read(os.path.expanduser(cred_file))
for name, value in parser.items(section):
if name == 'OS_AUTH_URL':
... | [
"def load_credentials():\n\n # Get the config file path\n path = os.path.expanduser('~/.stirplate/config')\n\n # Default the credentials to NoneType\n user_id = None\n key = None\n secret = None\n location = None\n\n # Read the config data\n if os.path.exists(path):\n with open(pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload an object to a swift object store. | def _upload(self, variables):
required_vars = ['container', 'src', 'object']
variables_dict = self._get_vars(variables, required=required_vars)
container_name = variables_dict.pop('container')
object_name = variables_dict.pop('object')
src_path = variables_dict.pop('src')
... | [
"def upload_object(container, object_name, f):\n with SwiftService() as swift:\n objs = [\n SwiftUploadObject(\n f, object_name=object_name\n )\n ]\n responses = swift.upload(container, objs)\n _ensure_success(responses)",
"def upload(self, conta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure a container exists. If it does not, it will be created. | def _create_container(self, container_name):
try:
container = self.swift.head_container(container_name)
except client.ClientException:
self.swift.put_container(container_name)
else:
return container | [
"def create_container_if_missing(container, swift_conn, options):\n try:\n swift_conn.head_container(container)\n except swift_client.ClientException, e:\n if e.http_status == httplib.NOT_FOUND:\n add_container = config.get_option(options,\n 'swift_store... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a dictionary of numbers from zero to fifteen and the hexadecimal equivalent | def DictFunction2():
print "Create Second Dictionary"
NumberDict = dict(zip((i for i in range(16)), (hex(i) for i in range(16))))
print NumberDict | [
"def problem2():\n\n numbers = range(0, 16)\n result = dict(zip(numbers, [hex(hexadecimal) for hexadecimal in numbers]))\n print(result)",
"def hex_probabilities(self):\n return {hex(key): value for key, value in self.items()}",
"def make_gematria() -> Dict[int, str]:\n return {char: i for i,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This strategy always tries to steer the hunter directly towards where the target last said it was and then moves forwards at full speed. This strategy also keeps track of all the target measurements, hunter positions, and hunter headings over time, but it doesn't do anything with that information. | def next_move(hunter_position, hunter_heading, target_measurement, max_distance, OTHER = None):
# This function will be called after each time the target moves.
# The OTHER variable is a place for you to store any historical information about
# the progress of the hunt (or maybe some localization informati... | [
"def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER=None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if your next_move_fcn successfully guides the hunter_bot to the target_bot. This function is here to help you understand how we will grade your submission. | def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER = None):
max_distance = 0.97 * target_bot.distance # 0.98 is an example. It will change.
separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target
caught = False
ctr = 0
# We will use your n... | [
"def demo_grading(hunter_bot, target_bot, next_move_fcn, OTHER=None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n\n # We will... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the angle, in radians, between the target and hunter positions | def get_heading(hunter_position, target_position):
hunter_x, hunter_y = hunter_position
target_x, target_y = target_position
heading = atan2(target_y - hunter_y, target_x - hunter_x)
heading = angle_trunc(heading)
return heading | [
"def get_angle((origin_x, origin_y), (target_x, target_y)):\n x_distance = target_x - origin_x\n y_distance = target_y - origin_y\n angle = math.atan2(y_distance, x_distance)\n return angle",
"def best_target_angle(self):\n\n best_target = self.best_target()\n\n if best_target is None:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the stress_test_number param from user params. Gets the stress_test_number param. If absent, returns default 100. | def get_stress_test_number(self):
return int(self.user_params.get("stress_test_number", 100)) | [
"def get_test_param(self, name, default=None):\n path = \"/\".join([\"/run/daos_tests\", name, \"*\"])\n return self.params.get(self.get_test_name(), path, default)",
"def numeric_param(self, param):\n return self.text.get(param, 0)",
"def get_trial_param(self, trial_id: int, param_name: st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a sequence (let's say from a context window), extract its components under the assumption that each "word" in the sequence is a triplet, and triplets may overlap on the last base | def get_triplet_composition(seq):
out = []
for i in range(len(seq)):
if i+3 > len(seq):
break
out.append(seq[i:i+3])
return out | [
"def bipa(sequence):\n return [_token2clts(segment)[0] for segment in sequence]",
"def get_complementary_sequence(sequence):\n\n complementary_sequence = ''\n for char in sequence:\n complementary_sequence = complementary_sequence + get_complement(char)\n return complementary_sequence",
"def ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens marker file and adds all markers to dictionary with | def open_markers(filename):
markers = {}
try:
with open(filename, "r") as f:
lines = f.readlines()
cur_marker = ""
cur_marker_name = ""
for i in range(len(lines)):
if i >= 7:
cur_line = lines[i]
if cu... | [
"def readMarkers(self,markerfile):\n with open(markerfile,'r') as fin:\n count = 0\n for line in fin:\n if line.startswith('#'): continue\n l = line.strip().split()\n if len(l) == 0: continue\n if len(l) == 6: chrom,name,distan... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates chisquared values based on amount of a and b in the marker data. Markers with chisquared value > 3.84 are discarded. | def chi_squared(markers):
new_markers = {}
for marker in markers:
line = markers[marker][0]
a = line.count("a")
b = line.count("b")
length = a + b
expect_a = length / 2
expect_b = length / 2
chisq = pow((a - expect_a), 2) / expect_a + pow((b - expect_b),
... | [
"def chisquared(data, model):\n chisq = 0.\n ndata = 0\n for mspec, dspec in zip(model.data, data.data):\n ok = dspec.ferr > 0.\n chisq += (((dspec.flux[ok]-mspec.flux[ok])/dspec.ferr[ok])**2).sum()\n ndata += len(dspec.flux[ok])\n return (chisq, ndata)",
"def _chisquare_value(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates recombination frequency between all combinations of two markers. | def rec_freq(markers):
keys = list(markers.keys())
rf_pairs = {}
for i in range(len(markers)):
for j in range(i + 1, len(markers)):
m1 = markers[keys[i]][0]
m2 = markers[keys[j]][0]
tot_len = 0
score = 0
if len(m1) != len(m2):
... | [
"def joint_frequencies_combo(self, alleles, normalize):\n\n hap = self.build_intrenal_hap_dict(alleles)\n\n result = {c: popcount(A) for c,A in hap.items() }\n\n for C in combinations(hap, 2):\n result[C[0]|C[1]] = popcount(hap[C[0]]&hap[C[1]])\n\n for C in combinations(hap, 3... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates the distances between a list of markers from the first marker. | def calc_distances(marker_list, rf_pairs):
final_distance = [[marker_list[0], 0]]
for i in range(1, len(marker_list)):
cur_markers = [marker_list[i-1], marker_list[i]]
for rf_pair in rf_pairs:
if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:
final_distance... | [
"def GetDistances(self,otherLandmarks,spacing=[1,1,1]):\n if self.GetNumberOfLandmarks() != otherLandmarks.GetNumberOfLandmarks(): raise Exception(\"Other landmarks must have same number of landmarks as this landmarks object.\")\n\n otherLandmarkList = otherLandmarks.GetLandmarks()\n distanceLi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Should index a batch in the form of a list of (id,url,other_data) | def index_batch(self,batch):
pass | [
"def bulk_index(data):\n\n def bulk_api_string(item):\n return f\"{{\\\"index\\\":{{}}\\n{json.dumps(item)}\"\n\n body = '\\n'.join([bulk_api_string(item) for item in data]) + '\\n'\n\n return make_request(\n requests.post,\n url=f\"{connection.hostname}:{connection.port}/{connection.i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for add_provisioning_request Add a provisioning request | def test_add_provisioning_request(self):
body = PortProvisionRequest()
response = self.client.open('/api/provisioning/port',
method='POST',
data=json.dumps(body),
content_type='application/json')
... | [
"def add_provisioning_request():\n if connexion.request.is_json:\n discovery = PortProvisionRequest.from_dict(connexion.request.get_json())\n return discovery.save()",
"async def test_create(self):\n expected = {\n 'id': 'id'\n }\n profile = {\n 'name': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for delete_provisioning_request Deletes a port provisioning request | def test_delete_provisioning_request(self):
response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),
method='DELETE')
self.assert200(response, "Response body is : " + response.data.decode('utf-8')) | [
"def delete_provisioning_request(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n\n if doc:\n print(doc)\n doc.delete()\n return {\"status\": \"deleted\"}\n else:\n return 'Not Found', 404",
"async def test_delete(self):\n rsps = respx.delete(f'{PROVISIONING... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_provisioning_request_by_id get provisioning request by ID | def test_get_provisioning_request_by_id(self):
response = self.client.open('/api/provisioning/port/{requestId}'.format(requestId='requestId_example'),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8')) | [
"def get_provisioning_request_by_id(requestId):\n doc = PortProvisionRequest.get(id=requestId)\n if doc:\n return doc\n else:\n return 'Not Found', 404",
"async def test_retrieve_one(self):\n expected = {\n '_id': 'id',\n 'name': 'name',\n 'version': ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test case for get_requests List server connectivity requests | def test_get_requests(self):
response = self.client.open('/api/provisioning/port',
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8')) | [
"def get_requests(self):",
"def test_list_server(self):\n pass",
"def test_get_servers(self):\n response = self.client.open(\n '/v1/servers',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get mean/std and optional min/max of scalar x across MPI processes. | def statistics_scalar(x, with_min_and_max=False):
x = np.array(x, dtype=np.float32)
global_sum, global_n = np.sum(x), len(x)
mean = global_sum / global_n
global_sum_sq = np.sum((x - mean) ** 2)
std = np.sqrt(global_sum_sq / global_n) # compute global std
if with_min_and_max:
global_mi... | [
"def mpi_statistics_scalar(x, with_min_and_max=False):\n x = np.array(x, dtype=np.float32)\n global_sum, global_n = mpi_sum([np.sum(x), len(x)])\n mean = global_sum / global_n\n\n global_sum_sq = mpi_sum(np.sum((x - mean) ** 2))\n std = np.sqrt(global_sum_sq / global_n) # compute global std\n\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse a kallisto abundance.tsv file, return dict transcriptId > est_tpm Does not return a value for transcripts where est_tpm is 0 | def parseKallisto(fname):
logging.debug("parsing %s" % fname)
ifh = open(fname)
ifh.readline()
d = {}
for line in ifh:
fs = line.rstrip("\n").split("\t")
if fs[tpmColumnIndex]=="0" and not addZeros:
continue
d[fs[0]] = float(fs[tpmColumnIndex])
return d | [
"def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a list of cellNames and a list of transcript > count dictionaries, write out a matrix with transcript > counts in columns | def outputBigMatrix(cellNames, results, outFname, isGene=False):
logging.info("Writing data to file %s" % outFname)
ofh = open(outFname, "w")
# write header
if isGene:
ofh.write("#gene\t%s\n" % "\t".join(cellNames))
else:
ofh.write("#transcript\t%s\n" % "\t".join(cellNames))
... | [
"def create_count_matrix(filename, output_dir):\n\n import os\n import json\n\n word_tag_output = \"tag_word_count.json\"\n bigram_matrix_name = \"bigram_count.json\"\n unigram_matrix_name = \"unigram_count.json\"\n trigram_matrix_name = \"trigram_count.json\"\n\n sub_dir = os.path.join(output_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a list of dict transcript > tpm, and a map transcript > gene, map all transcripts to genes and return a list of gene > sum of tpms If we have no gene ID, drop the transcript entirely. | def sumTransToGene(transDictList, transFile):
transToGene = parseDict(transFile, stripDot=True)
logging.info("Mapping %d transcript IDs to gene IDs" % len(transToGene))
newRes = []
noMapTransIds = set()
for transCounts in transDictList:
geneCounts = defaultdict(float)
for transId, c... | [
"def Transcript2GeneTree(tree,\n map_transcript2gene,\n map_gene2transcripts):\n raise NotImplementedError()\n MapTaxa(tree, map_transcript2gene)\n\n # get all leaves and sort by taxon\n ids = tree.get_terminals()\n\n # sort identities by taxa\n ids.so... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Records a param measurement and returns it. | def measure(self, timestamp, param):
if param in self.faulty:
value = random.randint(*self.FAULTY[param])
else:
value = self.patient.measure(param)
self.__buffer[param].append(Measurement(timestamp, value))
return value | [
"def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError",
"def get_last_measurement(self, param):\n return self.__buffer[param][-1]",
"def log_param(self, run_id, param):\n self.log_batch(run_id, metrics=[], params=[param], tags=[])",
"def log_para... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets param last measurment. | def get_last_measurement(self, param):
return self.__buffer[param][-1] | [
"def last_percept(self):\n return self.percept",
"def last_fmeasure(self):\n return self.get_fvalue(self.last_position())",
"def get_output(self, last = 1):\n\t\tif last == -1:\n\t\t\ttmp = self.out_param[::]\n\t\t\tself.out_param = []\n\t\t\treturn tmp\n\t\treturn self.out_param[-last:]",
"def last... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initial profanity check using profanity_check | def profanityCheck(text):
return predict_prob([text])[0] | [
"def test_create_tokens_with_profanity():\n list_responses = ['test this code', ' for bad words', 'such as shit']\n check = edurate_gensim.create_tokens(list_responses)\n assert check == [['test', 'code'], ['bad', 'words']]\n assert (\"shit\" in check) is False",
"def main():\n from argparse import... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a DrsClient. This will delete any documents, aliases, or users made by this client after the test has completed. Currently the default user is the admin user Runs once per test. | def drs_client(indexd_server):
try:
user = create_user("user", "user")
except Exception:
user = ("user", "user")
client = DrsClient(baseurl=indexd_server.baseurl, auth=user)
yield client
clear_database() | [
"def admin_drf_client(admin_user):\n client = APIClient()\n client.force_authenticate(user=admin_user)\n return client",
"def get_test_client(self):\n return self._connection.get_client('azure.devops.released.test.test_client.TestClient')",
"def _get_crm_client(self, user_email):\n user_s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |