query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Function to gracefully depart from DHT If forward=False, then the DHT is shutting down and we don't need to move the data.
def _depart(self, data, sock, forward=True): if forward: self.send_replicas_forward() time.sleep(1) self.send_data_forward() # Let the previous node know who its new next node is, after I depart self.neighbors.send_back('next:{}:{}:{}'.format(self.neighbors.fr...
[ "def _StopForwarder(self):\n if self._flag_changer:\n print('Restoring flags while stopping forwarder, but why?...')\n self._flag_changer.Restore()\n self._flag_changer = None\n print('Stopping device forwarder...')\n forwarder.Forwarder.UnmapAllDevicePorts(self._device)", "def remove_forw...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In case of departing, sends all stored data to the next server
def send_data_forward(self): front_ip = self.neighbors.front_ip front_port = self.neighbors.front_port self.data_lock.acquire() for key, value in self.data.iteritems(): #Process(target= lambda : self.neighbors.send_front('insert_after_depart:{}:{}'.format(value[0],value[1])))...
[ "def unpack_data(self, usnap=.2): # 2/10th second sleep between empty requests\n for new_data in self.socket:\n if new_data:\n self.data_stream.unpack(new_data)\n else:\n sleep(usnap) # Sleep in seconds after an empty look up.", "def _data_handler(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In case of departing, sends all stored replicas to the next server
def send_replicas_forward(self): self.data_lock.acquire() for key, value in self.replicas.itervalues(): Process(target= lambda : self.neighbors.send_front('add:{}:{}:1:{}'.format(key, value, self.hash))).start() self.data_lock.release()
[ "async def _send_to_replicas(self, request, replicas):\n async with trio.open_nursery() as nursery:\n for replica in replicas:\n nursery.start_soon(self._sendto_common, request, replica)", "def master_send_continue():\n for ii in get_slaves():\n mpi_comm.send(None, dest=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Someone in the back of us wants to add more replicas of what they sent me If I already have it, then push it forward
def _add_replica(self, data, sock): _, key, value, copies, host = data.split(':') key_hash = sha1(key).hexdigest() self.data_lock.acquire() #If I don't have it add it as a replica if self.replicas.get(key_hash, None) != (key, value): self.replicas[key_hash] = (key, va...
[ "def take_over(self, req):\n self.assert_is_internal(req)\n status = Response(content_type='text/plain')\n data = req.json\n nodes = data['other']\n self_name = data['name']\n bad_node = data['bad']\n assert self_name != bad_node\n backups = data['backups']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A new (key, value) pair is inserted If it doesn't belong to us, send it forward Otherwise add replication1
def _insert(self, data, sock): _, key, value = data.split(':') key_hash = sha1(key).hexdigest() self.data_lock.acquire() if self.data.get(key_hash, (None, None))[1] == value: # If I already have with the same value return self.data_lock.release() ...
[ "def _add_replica(self, data, sock):\n _, key, value, copies, host = data.split(':')\n key_hash = sha1(key).hexdigest()\n self.data_lock.acquire()\n #If I don't have it add it as a replica\n if self.replicas.get(key_hash, None) != (key, value):\n self.replicas[key_hash]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints my data and forwards the message
def _print_my_data(self, data, sock): _, sender_ip , sender_port, sender_hash = data.split(':') if self.hash != sender_hash: self.data_lock.acquire() if self.replication > 1: x = '\n' +self.host +"/"+str(self.port) + " with id "+ self.id +'\n Data->' + str([value...
[ "def server_print(self, some_data):\n\n some_data = some_data + '\\n' if some_data[-1] != '\\n' else some_data\n self.send_data(some_data)\n return", "def display(self):\n\n print(self.payload)", "def prints(self, data, base=None):\r\n return self.write(self._process(data, base))"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts data printing of all servers data = print_all_data
def _print_all_data(self, data, sock): self.data_lock.acquire() if self.replication > 1: x = self.host + "/" + str(self.port) + " with id " + self.id + '\n Data->' + str([value for value in self.data.itervalues()]) + '\n' +' Replicas->' + str([value for value in self.replicas.itervalues()]) ...
[ "def print_all_records(self):", "def servers():\n\tg.db = mysqladm.core.db_connect()\n\n\t## Load servers\n\trows = mysqladm.servers.get_servers()\n\t\n\tfor row in rows:\n\t\tprint(row['hostname'])", "def _print_my_data(self, data, sock):\n _, sender_ip , sender_port, sender_hash = data.split(':')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a tensor, downscale by taking the average among the [scale] points.
def get_downscaled_tensor(raw_tensor, scale): # do nothing if the user does not want to downscales it if scale == 1: return raw_tensor # reshape the data so that each group in stored in one axis avg_shape = np.asarray(raw_tensor.shape) // scale shape_list = [] for s in avg_shape: ...
[ "def _unscale(tensor, minimum, maximum):\n b, c, h, w = tensor.shape\n out = tensor.view(b, c, h * w)\n out = (out + 1) / 2 # out has range (0, 1)\n out = out * maximum + minimum # out has original range\n return out.view(b, c, h, w)", "def _scale(tensor):\n b, c, h, w = tensor.shape\n out = tensor.view(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the `alignment` in FASTA format to either a file object or file
def write_fasta(alignment, dest): file_obj = None if isinstance(dest, str): file_obj = open(dest, "w") else: file_obj = dest for name, seq in alignment.items(): file_obj.write('>%s\n%s\n' % (name, seq) ) if isinstance(dest, str): file_obj.close()
[ "def write_alignment(self, alignment):\n if not isinstance(alignment, MultipleSeqAlignment):\n raise TypeError(\"Expected an alignment object\")\n\n if len({len(x) for x in alignment}) > 1:\n raise ValueError(\"Sequences must all be the same length\")\n\n # We allow multip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Populate 'self' from a CorticalSurfaceData instance with additional CortexData specific attributes.
def populate_cortex(self, cortex_surface, cortex_parameters=None): for name in cortex_surface.trait: try: setattr(self, name, getattr(cortex_surface, name)) except Exception, exc: self.logger.exception(exc) self.logger.error("Could not set ...
[ "def __init__(self):\n\n super(LandSpillData, self).__init__()\n\n # Reference dynamic viscosity used in temperature-dependent viscosity\n self.add_attribute('ref_mu', 332.) # unit: mPa-s (= cP = 1e-3kg/s/m)\n\n # Reference temperature at which the nu_ref is\n self.add_attribute('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define shortcut for retrieving RegionMapping map array.
def region_mapping(self): if self.region_mapping_data is None: return None return self.region_mapping_data.array_data
[ "def get_maps(self):\n return # osid.mapping.MapList", "def _GetAgentMap(self,ID):\n agnt = self.agents[ID]\n if agnt.EgoCentric:\n array =agnt.FullEgoCentric\n else:\n array = agnt.FullEgoCentric[agnt.borders[0]:agnt.borders[1],agnt.borders[2]:agnt.borders[3]]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs seqstats and returns dictionary of metrics. Formats metrics to numeric type as well.
def run_seqstats(fasta): seqstats = Popen(f"seqstats {fasta}", shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = seqstats.communicate() stdout, stderr = ( stdout.decode("utf-8"), stderr.decode("utf-8"), ) assert ( "command not found" not in stderr ), "seqstats was no...
[ "def get_stats(self):\n\n \"\"\"#See samtools stats\n # 1526795 + 0 in total (QC-passed reads + QC-failed reads)\n 13 + 0 secondary\n 0 + 0 supplementary\n 0 + 0 duplicates\n 3010 + 0 mapped (0.20% : N/A)\n 1526782 + 0 paired in sequencing\n 763391 + 0 read1\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Square area method unittest
def test_area_method(self): sq6 = Square(10) self.assertEqual(sq6.area(), 100)
[ "def testarea2(self):\n a = Square(4, 100, 20, 10)\n self.assertEqual(a.area(), 16)", "def test_area(self):\n r1 = Rectangle(3, 2)\n area = r1.area()\n self.assertEqual(area, 6)\n\n r2 = Rectangle(3, 2)\n area = Rectangle.area(r2)\n self.assertEqual(area, 6)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Square display method unittest
def test_display_method(self): output = io.StringIO() sys.stdout = output sq7 = Square(2) sq7.display() sys.stdout = sys.__stdout__ self.assertEqual(output.getvalue(), "##\n##\n")
[ "def test_display_method_w_coordinates(self):\n output = io.StringIO()\n sys.stdout = output\n sq9 = Square(2, x=1, y=1)\n sq9.display()\n sys.stdout = sys.__stdout__\n self.assertEqual(output.getvalue(), \"\\n ##\\n ##\\n\")", "def test_display(self):\n output = S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Square __str__ method unittest
def test_str_method(self): sq8 = Square(2, id=99) str_s = sq8.__str__() self.assertEqual(str_s, '[Square] (99) 0/0 - 2')
[ "def test_str_xy(self):\n s1 = Square(7, 8, 9)\n s1.id = 4\n self.assertEqual(s1.__str__(), \"[Square] (4) 8/9 - 7\")", "def test_str_size(self):\n s1 = Square(1)\n s1.id = 3\n self.assertEqual(s1.__str__(), \"[Square] (3) 0/0 - 1\")", "def test_string_representation(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Square display method unittest
def test_display_method_w_coordinates(self): output = io.StringIO() sys.stdout = output sq9 = Square(2, x=1, y=1) sq9.display() sys.stdout = sys.__stdout__ self.assertEqual(output.getvalue(), "\n ##\n ##\n")
[ "def test_display_method(self):\n output = io.StringIO()\n sys.stdout = output\n sq7 = Square(2)\n sq7.display()\n sys.stdout = sys.__stdout__\n self.assertEqual(output.getvalue(), \"##\\n##\\n\")", "def test_display(self):\n output = StringIO()\n sys.stdout...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up a fake field on the other side of a foreign key relation.
def install_inverse(cls, other_model, field_name, field): inverse_name = field._options.get("inverse", plural(other_model._table_name)) cls._fields[inverse_name] = Field(other_model, virtual=True, forward_name=field_name) cls._fields[inverse_name].__set_name__(cls, inverse_name) setattr(...
[ "def test_formfield_for_foreignkey_no_override(self):\n with patch('dbentry.base.admin.super'):\n with patch('dbentry.base.admin.make_widget') as make_mock:\n inline = AutocompleteMixin()\n inline.tabular_autocomplete = []\n inline.formfield_for_foreign...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the query strategy of the experiment.
def set_query_strategy(self, strategy="QueryInstanceUncertainty", **kwargs): # check if self._existed_query_strategy: raise Exception("You already has set the query strategy,don`t has to set it again.") # user-defined strategy if callable(strategy): self.__custom...
[ "def _set_search(self, strategy = \"d\"):\n\n if strategy == \"a\":\n self.search = self.search_aggressive\n elif strategy == \"d\":\n self.search = self.search_default\n elif strategy == \"c\":\n self.search = self.search_cautious\n elif strategy == \" e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the data split indexes by user input the specific parameters.
def set_data_split(self, train_idx, test_idx, label_idx, unlabel_idx): if not (len(train_idx) == len(test_idx) == len(label_idx) == len(unlabel_idx)): raise ValueError("_train_idx, _test_idx, _label_idx, _unlabel_idx " "should have the same split count (length)") ...
[ "def split_dataset_by_indices():", "def set_data(self, data, index=None):\n if data is not None:\n data = self.data_class(data)\n logger.info(\"Length of supplied data: %d\", data.ndata)\n self.check_data(data)\n self._data = data\n if index is not Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a path to model files model_path string
def create_model_path(model_path): if not model_path.startswith("/") and not model_path.startswith("models/"): model_path="/" + model_path if not model_path.startswith("models"): model_path = "models" + model_path if not model_path.endswith(".p"): model_path+=".p" return model_p...
[ "def _model_path(self) -> str:\n return Container().data_path() + '/' + self.MODEL_NAME", "def make_model_path(model_base_path: str, model_name: str,\n version: int) -> str:\n return os.path.join(model_base_path, model_name, str(version))", "def make_model_pickle_path(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses aspell to spell correct an input string. Requires aspell to be installed and added to the path. Returns the spell corrected string if aspell is found, original string if not. string string
def spell_correct(string): # Create a temp file so that aspell could be used # By default, tempfile will delete this file when the file handle is closed. f = tempfile.NamedTemporaryFile(mode='w') f.write(string) f.flush() f_path = os.path.abspath(f.name) try: p = os.popen(aspell_pat...
[ "def fix_spelling(text: str) -> str:\n return EnglishSpellCheckerSingleton.get_instance().fix_text(text)", "def spelling_corrections(text):\r\n\r\n text = re.sub(r'(?<![a-z])affeceted|afected|afective|afeected(?![a-z])', r'affected', text)\r\n text = re.sub(r'(?<![a-z])aply|applied|appy|aplly|aaply|aappl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates ngrams(word sequences of fixed length) from an input token sequence. tokens is a list of words. min_n is the minimum length of an ngram to return. max_n is the maximum length of an ngram to return. returns a list of ngrams (words separated by a space)
def ngrams(tokens, min_n, max_n): all_ngrams = list() n_tokens = len(tokens) for i in range(n_tokens): for j in range(i + min_n, min(n_tokens, i + max_n) + 1): all_ngrams.append(" ".join(tokens[i:j])) return all_ngrams
[ "def get_ngrams(self,list_of_tokens, min_n, max_n):\n all_ngrams = list()\n for tokens in list_of_tokens:\n n_tokens = len(tokens)\n for i in range(n_tokens):\n for j in range(i + min_n, min(n_tokens, i + max_n) + 1):\n all_ngrams.append(\" \".jo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses a fisher test to find words that are significant in that they separate high scoring essays from low scoring essays. text is a list of input essays. score is a list of scores, with score[n] corresponding to text[n] max_feats is the maximum number of features to consider in the first pass max_feats2 is the maximum n...
def get_vocab(text, score, max_feats=750, max_feats2=200): dict = CountVectorizer(ngram_range=(1,2), max_features=max_feats) dict_mat = dict.fit_transform(text) set_score = numpy.asarray(score, dtype=numpy.int) med_score = numpy.median(set_score) new_score = set_score if(med_score == 0): ...
[ "def otherFeatures(tweet):\r\n sentiment = sentiment_analyzer.polarity_scores(tweet)\r\n words = textclean(tweet) \r\n \r\n syllables = textstat.syllable_count(words)\r\n num_chars = sum(len(w) for w in words)\r\n num_chars_total = len(tweet)\r\n num_terms = len(tweet.split())\r\n num_words ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates string edit distance between string 1 and string 2. Deletion, insertion, substitution, and transposition all increase edit distance.
def edit_distance(s1, s2): d = {} lenstr1 = len(s1) lenstr2 = len(s2) for i in xrange(-1, lenstr1 + 1): d[(i, -1)] = i + 1 for j in xrange(-1, lenstr2 + 1): d[(-1, j)] = j + 1 for i in xrange(lenstr1): for j in xrange(lenstr2): if s1[i] == s2[j]: ...
[ "def edit_distance(string1, string2):\n distance = []\n for _ in xrange(len(string1) + 1):\n distance.append([0 for _ in xrange(len(string2) + 1)])\n \n for i in xrange(1, len(string1) + 1):\n distance[i][0] = i\n for i in xrange(1, len(string2) + 1):\n distance[0][i] = i\n \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fits a classifier to data and a target score clf is an input classifier that implements the fit method. arr is a data array(X)
def gen_model(clf, arr, sel_score): set_score = numpy.asarray(sel_score, dtype=numpy.int) sim_fit = clf.fit(arr, set_score) return(sim_fit)
[ "def fit(self, X, y=None, _refit=True):\n\n # If _refit=True and y is None -> return error\n if _refit and y is None:\n raise ValueError('Parameter \"y\" was not given!')\n\n # If _refit=False and y not None -> return warning (y will be ignored)\n if not _refit and y is not No...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates predictions on a novel data array using a fit classifier clf is a classifier that has already been fit arr is a data array identical in dimension to the array clf was trained on Returns the array of predictions.
def gen_preds(clf, arr): if(hasattr(clf, "predict_proba")): ret = clf.predict(arr) # pred_score=preds.argmax(1)+min(x._score) else: ret = clf.predict(arr) return ret
[ "def predict(self, test_data, predict_proba = False, pred_class_and_proba = False):\n pass", "def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array([self._classify(x) for x in X])", "def predict(self, X):\n # predict the class of y with classifier\n classes = self.clf.pre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates kappa correlation between rater_a and rater_b. Kappa measures how well 2 quantities vary together. rater_a is a list of rater a scores rater_b is a list of rater b scores min_rating is an optional argument describing the minimum rating possible on the data set max_rating is an optional argument describing th...
def quadratic_weighted_kappa(rater_a, rater_b, min_rating=None, max_rating=None): assert(len(rater_a) == len(rater_b)) rater_a = [int(a) for a in rater_a] rater_b = [int(b) for b in rater_b] if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating ...
[ "def quadratic_weighted_kappa(rater_a, rater_b, min_rating=None, max_rating=None):\n import numpy as np\n rater_a = np.array(rater_a, dtype=int)\n rater_b = np.array(rater_b, dtype=int)\n assert (len(rater_a) == len(rater_b))\n if min_rating is None:\n min_rating = min(min(rater_a), min(rater_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utilize wordnet (installed with nltk) to get synonyms for words word is the input word returns a list of unique synonyms
def get_wordnet_syns(word): synonyms = [] regex = r"_" pat = re.compile(regex) synset = nltk.wordnet.wordnet.synsets(word) for ss in synset: for swords in ss.lemma_names: synonyms.append(pat.sub(" ", swords.lower())) synonyms = f7(synonyms) return synonyms
[ "def find_synonyms(word):\n synonyms = [' '.join(syn_name.split('_')) for syn in wordnet.synsets(word.lower()) for syn_name in syn.lemma_names()\n if syn_name.lower() != word.lower() and\n len(syn_name.split('_')) == 1 and\n lemmatizer.lemmatize(syn_name) != lemmatize...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the words that separate a list of tokens from a background corpus Basically this generates a list of informative/interesting words in a set toks1 is a list of words Returns a list of separator words
def get_separator_words(toks1): tab_toks1 = nltk.FreqDist(word.lower() for word in toks1) if(os.path.isfile(ESSAY_COR_TOKENS_PATH)): toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, 'rb')) else: essay_corpus = open(ESSAY_CORPUS_PATH).read() essay_corpus = sub_chars(essay_corpus) ...
[ "def find_words(tokens_inside_word_group):\n words = []\n for token in tokens_inside_word_group:\n if token == CASE_GEN_SYM:\n continue\n if token in (RAND_GEN_SYM, VARIATION_SYM, ARG_SYM):\n return words\n words.append(token)\n return words", "def extract_peop...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Literally encodes the plus sign input is a string returns the string with plus signs encoded
def encode_plus(s): regex = r"\+" pat = re.compile(regex) return pat.sub("%2B", s)
[ "def quote_plus():", "def encodeString():\n pass", "def test_unquote_plus(self):\r\n\r\n result = colony.unquote_plus(\"Hello+World\")\r\n self.assertEqual(result, \"Hello World\")\r\n\r\n result = colony.unquote_plus(\"Ol%C3%A1+Mundo\")\r\n self.assertEqual(result, \"Olá Mundo\")...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create mapping of chars to binary codes based on frequency.
def _create_mapping(text): chars = get_chars_in_order_of_frequency(text) return dict(zip(chars, ['0'* i + '1' for i in xrange(len(chars))]))
[ "def canonical_huffman(__freq_map):\n if not isinstance(__freq_map, dict):\n raise TypeError(\"dict expected, got '%s'\" % type(__freq_map).__name__)\n\n if len(__freq_map) < 2:\n if len(__freq_map) == 0:\n raise ValueError(\"cannot create Huffman code with no symbols\")\n # On...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the chars in the `text` ordered by their frequency, descending.
def get_chars_in_order_of_frequency(text): def get_letter_frequencies(text): """Make dictionary of chars mapped to their frequency of use.""" frequencies = defaultdict(float) text_length = len(text) for letter in text: frequencies[letter] += 1.0 / text_length re...
[ "def character_frequency_list(self, text):\n\t\tif type(text) is str:\n\n\t\t\tfrequency = {}\n\t\t\tfrequency_list = []\n\t\t\ttreated_text = text.replace(\" \",\"\")\n\n\t\t\tfor c in treated_text:\n\n\t\t\t\tif c in frequency:\n\n\t\t\t\t\tfrequency[c] += 1\n\n\t\t\t\telse:\n\n\t\t\t\t\tfrequency[c] = 1\n\n\t\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Executes the logic to finally do the stratification. The boundingbox and the cellside length are handed over to a Node.js library, Turf.js. This part of our library causes a dependency on Node.
def stratify(self): # This is required because current working directory will the place where # the user ran their script from, not THIS directory. this_file_directory = os.path.dirname(os.path.abspath(__file__)) # Delegate this task to the Turf.js package using Node.js process...
[ "def __init__(self, myshp, mydbf, west_lon, east_lon, south_lat, north_lat):\n self.reader = shapefile.Reader(shp = myshp, dbf = mydbf)\n\n# The following four coordinate inputs must be obtained outside of the scope of the program. Go to\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.htm...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays initial battle trainer animations and sends out first pokemon
def initialize_battle(trainer): #flash() draw_rect() # draws tect box enemy_pos = (900,50) player_pos = (0,290) if (trainer): # checks if battle is wild or trainer blit(opponent, enemy_pos) blit(player,player_pos) for x in range(110): # traine...
[ "def _send_pokemon(self) -> None:\n\n self._player.animation()\n\n self._dialog.set_text(\"\")\n self._dialog.do(FadeIn(0.4) |\n (Delay(0.2) + CallFunc(self._dialog.set_text,\n I18n().get(\"BATTLE.GO_POKEMON\").format(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends out user pokemon at the start of battle
def send_player_pokemon(pok): global active_player_pokemon active_player_pokemon = pok # sets active user pokemon to the input, pok blank_bottom() blank_text() prompt_text = myfont.render("GO "+pok.species+"!", True,BLACK) name_text = tinyfont.render(pok.species, True...
[ "def _send_pokemon(self) -> None:\n\n self._player.animation()\n\n self._dialog.set_text(\"\")\n self._dialog.do(FadeIn(0.4) |\n (Delay(0.2) + CallFunc(self._dialog.set_text,\n I18n().get(\"BATTLE.GO_POKEMON\").format(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes white background from sprites
def rem_background(pok): img = pok.sprite.convert() img.set_colorkey(WHITE) spixel = pygame.PixelArray(img) # addresses different shades of white spixel.replace(WHITE, (255,255,255), distance = 0.029) spixel.close() img.set_colorkey(WHITE) pok.sprite = img
[ "def _erase (self):\n self.screen.blit_background (self._rect)", "def clear(self):\n self.sprites = []", "def clear(self, surface, background):\n self._clear_active = True\n if hasattr(background, 'width'):\n surface._blit_clear(background, self._sprites_drawn.values())\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows user to choose to battle
def user_choose_action(pok): blank_text() choice_text = myfont.render("Battle (b)",True,BLACK) blit(choice_text, text_blit_pos) update_text()
[ "def start_battle(self):\n\t\tdone = False\n\t\tif interface.fight not in g.pending_actions and g.PC.chosen_bobot:\n\t\t\tself.build_menu()\n\t\t\tdone = True\n\t\treturn done", "def battle(self, squad1_name, squad2_name) -> None:\n squad1 = self.get_squad(squad1_name)\n squad2 = self.get_squad(squa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that runs entire battle. Takes in two parties of Pokemon, runs battle to completion and takes keyboard input
def complete_battle(player_party, opp_party, trainer): #plays background music pygame.mixer.music.load('Battle Music.mp3') pygame.mixer.music.play(-1) global big_battle, choosing_action, choosing_move, action big_battle = True # battle running initialize_battle(trainer) ...
[ "def run(self):\n # Advance to the battle from either of these states:\n # 1) the player is talking with an npc\n # 2) the battle has already started but there's initial text\n # xyz wants to battle, a wild foobar appeared\n self.skip_start_text()\n\n # skip a few hundr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a string cookie into a dict
def cookie_to_dict(cookie): cookie_dict = dict() C = Cookie.SimpleCookie() C.load(cookie) print cookie print '*', C for morsel in C.values(): cookie_dict[morsel.key] = morsel.value return cookie_dict
[ "def parse_cookie(cookie: str) -> Dict[str, str]:\n cookiedict = {}\n for chunk in cookie.split(str(\";\")):\n if str(\"=\") in chunk:\n key, val = chunk.split(str(\"=\"), 1)\n else:\n # Assume an empty name per\n # https://bugzilla.mozilla.org/show_bug.cgi?id=16...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
we need the Addition object to be avaible in each test case hence we initialize setUp method.
def setUp(self): self.calc = Addition()
[ "def setUp(self):\r\n\r\n self.DUT = Allocation()", "def setUp(self):\r\n\r\n self.initialize_account()\r\n self.initialize_categories()\r\n self.initialize_products()\r\n \r\n self.admin_user.staff.services.add(self.product_1, self.product_2)", "def setUp(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override in subclass to reserve a certain number of attributes that can't be used for chunks.
def _reservedAttributes(cls): return 0
[ "def Attributes(self) -> _n_5_t_17:", "def initAttributes(self):\n pass", "def ensure_has_attrs(self, *args):\n for attr in args:\n if attr not in self:\n raise self.mk_except('Block(ty=' + self.block_type + ') ' +\n 'missing attr \"' + att...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Monkeypatched version of SDBConnection.put_attributes that uses POST instead of GET The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit for attribute values. Using POST prevents that.
def _put_attributes_using_post(self, domain_or_name, item_name, attributes, replace=True, expected_value=None): domain, domain_name = self.get_domain_and_name(domain_or_name) params = {'DomainName': domain_name, 'ItemName': item_name} self._build_name_value_list(p...
[ "def do_put(url, content_type = 'text/plain', accept = 'text/plain', categories = [], attributes = [], links = [], locations = []):\n return [], [], [], []", "def test_catalog_attribute_set_repository_v1_save_put(self):\n pass", "def _upload_attributes(self):\n if self._attributes_queue.num_ent...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retry an SDB operation while the failure matches a given predicate and until a given timeout expires, waiting a given amount of time in between attempts. This function is a generator that yields contextmanagers. See doctests below for example usage.
def retry_sdb(retry_after=a_short_time, retry_for=10 * a_short_time, retry_while=no_such_domain): if retry_for > 0: go = [None] @contextmanager def repeated_attempt(): try: yield except BotoServerError as e: ...
[ "def retryingIter(queryGenerator):\n lastCursor = None\n for i in range(100):\n query = queryGenerator()\n if lastCursor:\n query.with_cursor(lastCursor)\n try:\n for item in query:\n lastCursor = query.cursor()\n yield item\n except Timeout:\n logging.info('Attempt #%d fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of all the distinct sampling frequencies in the site Returns List[float] List of the unique sampling frequencies in a site
def getSampleFreqs(self) -> List[float]: sampleFreqs = set(self.fs.values()) return sorted(list(sampleFreqs))
[ "def getSampleFreqs(self) -> List[float]:\n sampleFreq = set()\n for site in self.sites:\n sampleFreq.update(self.getSiteData(site).getSampleFreqs())\n return sorted(list(sampleFreq))", "def unique_frequencies(self):\n return iterkeys(self.frequency_groups)", "def generate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the data reader of a measurement at a site
def getMeasurement(self, meas: str): if self.checkMeasurement(meas): return self.readers[meas] self.printWarning( "Meausrement directory {} for site {} not found".format(self.siteName, meas) ) return False
[ "def readDataFromURL():\n return", "def get_data(self,sensor):\n if sensor.id in self.measurements:\n return self.measurements[sensor.id]\n else: raise Exception(\"Sensor has no measurements available\")", "def get(name, time='now'):\n sensor_param = SensorParam(name, 'weather_dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the sample frequency in Hz of a particular measurement at a site
def getMeasurementSampleFreq(self, meas: str) -> float: return self.fs[meas]
[ "def get_freq(self, site):\n count = 0.0\n struct, dsites = site\n counts, total = self.counts[struct]\n for dsite in dsites:\n count += counts[dsite]\n return count / total", "def _get_frequency(self) -> float:\n return self._parent._lib.get_frequency(self._pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the time data path for a measurement at a site
def getMeasurementTimePath(self, meas: str) -> str: return os.path.join(self.timePath, meas)
[ "def datapath(self):", "def datapath(path: str) -> Path:\n return Path(DATA_PATH) / path", "def req_hydrodata(sites, start_date, end_date, url_top):\n sites_string = ','.join(sites)\n url = url_top +'iv/?site=' + sites_string + '&startDT=' + \\\n start_date + '&endDT=' + end_date + '&para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the statistic data path for a measurement at a site
def getMeasurementStatPath(self, meas: str) -> str: return os.path.join(self.statPath, meas)
[ "def get_stats_file_path(self):\n return os.path.join(self.webroot, 'stats.json')", "def datapath(self):", "def get_scraping_output_paths(site):\n site_py = site.replace('.', '_')\n raw_data_path = Path('data', 'raw')\n feed_path = Path(raw_data_path, site_py, 'items.csv')\n images_path = Pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the start time of a particular measurement at a site This returns the time of the first sample for the measurement
def getMeasurementStart(self, meas: str) -> datetime: return self.starts[meas]
[ "def get_start_time(self):\n\n return self.time_vector[0]", "def get_metric_start_time(self, metric):\r\n start = self.params['START_TIMESTAMP']\r\n try:\r\n influx_query = \"SELECT LAST(*) FROM \\\"{}\\\"\".format(metric)\r\n self.logger.debug(\"Influx query: %s\", infl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in a string of DNA and returns proportion of G or C in the string. GC_content('stringDNA') > floatA
def GC_content(dna): g = dna.count('G') c = dna.count('C') ret = (g+c)/len(dna) return ret
[ "def string_prob(string, GC_content):\n\n s = string.upper()\n\n p_gc = GC_content / 2\n p_at = (1 - GC_content) / 2\n\n p = p_at ** (s.count('A') + s.count('T'))\n p *= p_gc ** (s.count('G') + s.count('C'))\n\n return p", "def gcContent(chromosome):\n \"\"\"Finds the percentage of Gs and Cs ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a content file
def show_content(path): # Get Path and sane it base_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "..")) file_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "static", "content", path)) if not file_path.startswith(base_path): abort(403) return ...
[ "def show_text(self):\n\t\topen_file = open(self.file_name, 'r')\n\t\tprint()\n\t\tprint('================================')\n\t\tprint(self.file_name)\n\t\tprint('================================')\n\t\tprint()\n\t\tprint(open_file.read())", "def display(self):\n super().display()\n print(\"pièce j...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
replace numeric bond labels with unique bond stubs generate the bond set (self.bonds)
def stubbify_bonds(self): # If we are dealing with an object that contains a bond pattern, the degree of a node has no meaning. # The degree is used only for VF2 isomorphism checking, but not for pattern embeddings. self.bonds = set() bonds = {} for name in self.agents: ...
[ "def update_internal_bonds(self):\n for residue in self.residues:\n if isinstance(residue, (aa.Amino, aa.WAT, na.Nucleic)):\n for atom in residue.atoms:\n if not atom.has_reference:\n continue\n for bond in atom.reference....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fills in the nbonds dict, which reports the number of bonds between n1 and any other node it is bound to
def get_bond_numbers(self): # fuse with adjacency list function ? s = set() for (a1, s1), (a2, s2) in self.bonds: if (a1, a2) in s: self.nbonds[(a1, a2)] += 1 else: s.add((a1, a2)) self.nbonds[(a1, a2)] = 1
[ "def setNeighbors(self):\n \n self.nb = {}\n for a1 in self.data:\n ind1 = self.data.index(a1)\n nbd = {}\n nbd[ind1] = 0\n #nblist = self.nb[ind1] = [ind1]\n self.energy_matrix[ind1, ind1] = 0\n #set 1-2 interactions to 0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if the set of bonds implies a multigraph.
def is_multigraph(self): s = set() for (a1, s1), (a2, s2) in self.bonds: if (a1, a2) in s: return True else: s.add((a1, a2)) return False
[ "def hasParallelEdges(self): \n if self.__diGraph:\n raise \n for key in self.__Graph:\n temp = set()\n adjLst = self.__Graph[key]\n for item in adjLst:\n if item in temp:\n return True\n return False", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This emulates the networkx' G.nodes() method returning a list of node names.
def nodes(self): # return [k for k in self.agents] return self.name_list
[ "def get_nodes(self):\n\t\treturn node_names(self.network)", "def names(self):\n return {node for node in self.graph.nodes if self.name_is_valid(node)}", "def node_names():\n return list(node_mappings.keys())", "def get_nodes(self):\n return set(self._names)", "def get_nodes(self):\n nod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print representation if internal=False, print standard kappa expression
def show(self, internal=False): if internal: info = '' for i in range(0, self.size): name = self.name_list[i] interface = '' iface = self.agents[name] for s in iface: interface += s + '{' + iface[s]['stat...
[ "def __repr__(self,prefix=''):\n str_out = [self.show_search_parameters_values(prefix)]\n str_out.append(self.show_chains_info(prefix))\n # print transforms\n str_out = '\\n'.join(str_out)\n return str_out", "def pretty(self):\n config = self._serialization\n return Operators.operatorInstance...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a company entry.
def create_company(args: Dict[str, str]): company = handle_creating_company(args) return company
[ "def post(self, request):\n params = request.data\n response = create_company(params, request)\n return Response(data=response, status=status.HTTP_200_OK)", "def create(self, vals):\n if self._context is None:\n context = {}\n context = dict(self._context)\n co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if company with given id exists, then update the entry.
def update_company(args: Dict[str, Union[str, int]], company_id: int): company = handle_updating_company(args, company_id) return company
[ "def testUpdateCompany(self):\n if self.__class__.company1 is None:\n self.testCreateCompanies()\n postfix = ' Corp.'\n self.__class__.company1['name'] += postfix\n company = self.__class__.service.UpdateCompany(\n self.__class__.company1)\n self.assert_(isinstance(company, tuple))\n s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
connect Cisco1250 via telnet thus config the equipment
def _connect_via_telnet(self, host, username, password): self.get_logger().debug("Open telnet connection to equipment.") # Initialize telnet session telnet_session = telnetlib.Telnet() try: telnet_session.open(host) telnet_session.read_until("Use...
[ "def telnet_connect():\n conf = get_conf()\n tn = Telnet(conf['telnet_ip'], conf['telnet_port'], timeout=5)\n tn.read_until(b\"user: \", timeout=2)\n tn.write(conf['telnet_user'].encode('ascii') + b\"\\r\")\n tn.read_until(b\"password:\", timeout=2)\n tn.write(conf['telnet_pw'].encode('ascii') + b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
disconnect equipment from telnet
def __disconnect_via_telnet(self): self.get_logger().debug("Close telnet connection from the equipment.") if self.get_handle() != None: self.get_handle().close() self._set_handle(None)
[ "def disconnect(self):\n\n c.cso_logger.info('[{0}][{1}]: Disconnect from device'.format(self.target['name'], 'Disconnect'))\n message = {'action': 'update_task_status', 'task': 'Disconnect', 'uuid': self.target['uuid'],\n 'status': 'Disconnecting...'}\n self.emit_message(mess...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set AP Encryption type to WEP64
def _set_wifi_authentication_WEP64(self, passphrase): self.get_logger().debug("Set wifi authentication to WEP 64 bits") # WEP64 uses a 40-bit key = 5 bytes if len(passphrase) == 5: # Then we should transform ascii chars into hexadecimal values passphrase = char2hexa(pass...
[ "def _set_wifi_authentication_WEP128(self, passphrase):\n self.get_logger().debug(\"Set wifi authentication to WEP 128 bits\")\n\n # WEP128 uses a 104-bit key = 13 bytes\n if len(passphrase) == 13:\n # Then we should transform ascii chars into hexadecimal values\n passphra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set AP Encryption type to WEP128
def _set_wifi_authentication_WEP128(self, passphrase): self.get_logger().debug("Set wifi authentication to WEP 128 bits") # WEP128 uses a 104-bit key = 13 bytes if len(passphrase) == 13: # Then we should transform ascii chars into hexadecimal values passphrase = char2hex...
[ "def _set_wlan_encryption_wep(self, auth, wep_method, key_index, key_string = \"\"):\n # Only Open and Shared require WEP Key\n if((auth == self.info['const_auth_method_open']) or (auth == self.info['const_auth_method_shared'])):\n if (wep_method == self.info['const_encryption_method_wep64'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set AP Encryption type to WPA PSK TKIP
def _set_wifi_authentication_WPA_PSK_TKIP(self, passphrase): self.get_logger().debug("Set wifi authentication to WPA PSK TKIP") # No radius server to set self._send_cmd("no aaa new-model") for radio in self.WIFI_RADIOS: self._send_cmd("interface dot11radio " + str(r...
[ "def _set_wlan_encryption_wep(self, auth, wep_method, key_index, key_string = \"\"):\n # Only Open and Shared require WEP Key\n if((auth == self.info['const_auth_method_open']) or (auth == self.info['const_auth_method_shared'])):\n if (wep_method == self.info['const_encryption_method_wep64'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set AP Encryption type to WPA2 PSK AES
def _set_wifi_authentication_WPA2_PSK_AES(self, passphrase): self.get_logger().debug("Set wifi authentication to WPA2 PSK AES") # No radius server to set self._send_cmd("no aaa new-model") for radio in self.WIFI_RADIOS: self._send_cmd("interface dot11radio " + str(radio)) ...
[ "def _set_wlan_encryption_wpa(self, wpa_ver, auth, encryption, key_string = \"\"):\n if((auth == self.info['const_auth_method_open']) or (auth == self.info['const_auth_method_eap'])):\n if ((wpa_ver == self.info['const_encryption_method_wpa']) or (wpa_ver == self.info['const_encryption_method_wpa2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create ssid on the equipment
def create_ssid(self, ssid): self.get_logger().info("Create ssid '%s'" % str(ssid)) self._ssid = str(ssid) self._ssids.append(self._ssid) # Send commands to equipment self._send_cmd("dot11 ssid " + self._ssid) self._send_cmd("guest-mode") self._s...
[ "def createAP(ssid, password) :\n print(\"not yet implemented\")", "def _create_wlan(self, ssid, auth = \"\", encryption = \"\", wpa_ver = \"\", key_string = \"\",\n key_index = \"\", auth_server_name = \"\", use_web_auth = False, use_guest_access = False,\n acl_name = \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enable/Disable Wifi wireless Multimedia extensions
def set_wifi_wmm(self, mode): if mode in ("on", "1", 1): self._logger.info("Set wifi wmm to on") mode = 1 elif mode in ("off", "0", 0): self._logger.info("Set wifi wmm to off") mode = 0 else: raise Exception(-5, ...
[ "def toggle_wifi():\n config.set_wifi(not config.get_wifi())\n config.save_state()", "def test_attach_with_no_wifi(self):\n dut = self.android_devices[0]\n wutils.wifi_toggle_state(dut, False)\n autils.wait_for_event(dut, aconsts.BROADCAST_WIFI_AWARE_NOT_AVAILABLE)\n dut.droid.wifiAwareAttach()\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set wifi channel bandwidth
def set_wifi_bandwidth(self, bandwidth): if int(bandwidth) == 20: cmd = "channel width 20" elif int(bandwidth) == 40: cmd = "channel width 40-Above" else: raise Exception(-5, "Unsupported wifi bandwidth '%s'." % str(bandwidth)) for radi...
[ "def set_bandwidth_limit(self, value='BWFULL'):\n #CMD$=“BWL C1,ON”\n print debug_msg.TBD_MSG", "def set_bandwidth(self, bandwidth):\r\n self.obs.bandwidthHz = float(bandwidth)\r\n self.ave.bandwidthHz = float(bandwidth)\r\n self.hot.bandwidthHz = float(bandwidth)\r\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set wifi voip enabling
def set_wifi_voip(self, voip): if voip in ("on", "1", 1): self._logger.info("Set wifi voip to on") voip = 1 elif voip in ("off", "0", 0): self._logger.info("Set wifi voip to off") voip = 0 else: raise Exception(-5, "Parameter voip is no...
[ "def toggle_wifi():\n config.set_wifi(not config.get_wifi())\n config.save_state()", "def set_wifi_wmm(self, mode):\n if mode in (\"on\", \"1\", 1):\n self._logger.info(\"Set wifi wmm to on\")\n mode = 1\n elif mode in (\"off\", \"0\", 0):\n self._logger.info(\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set wifi transmit power in dBm
def set_wifi_power(self, standard, wifi_power): POWER_VALUES_2G = ["-1", "2", "5", "8", "11", "14", "17", "20", "max"] POWER_VALUES_5G = ["-1", "2", "5", "8", "11", "14", "17", "max"] # Control of the value to set if standard not in self.WIFI_STANDARD_5G \ and st...
[ "def get_transmit_power(self):\n (status, power) = self.__device.get_transmit_power()\n self.__device.decode_error_status(status, cmd='get_transmit_power', print_on_error=True)\n return \"%d dBm\" % (power)", "def set_transmit_power(self, power):\n (status, null) = self.__device.set_tr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if standard and authentication type combination is supported by Equipment
def _is_supported_config(self, standard_type, authentication_type): if standard_type in ['n','n2.4G','n5G'] and authentication_type in \ ['WEP64','WEP128','WPA-PSK-TKIP','EAP-WPA']: return False return True
[ "def supports_authorization_smart_vault(self):\n return # boolean", "def SupportsEncryption(self):\n return self.path_spec.type_indicator in (\n definitions.TYPE_INDICATORS_WITH_ENCRYPTION_SUPPORT)", "def OSSupportsExtendedProtection(self) -> bool:", "def is_supported(self, data_product):\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set wifi config, include standard and authentication
def set_wifi_config(self, ssid, standard_type, authentication_type, passphrase, channel = None, dtim = None, wmm = None, bandwid...
[ "def _set_wlan_cfg(zd, wlan_conf, is_create = True, get_alert=True):\n if is_create == True:\n conf = {'ssid': None, 'description': None, 'auth': '', 'wpa_ver': '', 'encryption': '', 'type': 'standard',\n 'hotspot_profile': '', 'key_string': '', 'key_index': '', 'auth_svr': '',\n 'do...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a file directory depending on the problem instance and problem name provided where a result of the worker method can be stored.
def _build_problem_instance_directory(problem_instance, problem_name): path = pathlib.Path(__file__).parent.resolve() directory = Path("../output", problem_name.value, problem_instance.input_graph.graph["graph_type"].value) full_path = (path).joinpath(directory) return full_path
[ "def _make_results_dir(self):\n dir_name = self.config.habitat_baselines.il.results_dir.format(\n split=\"val\"\n )\n os.makedirs(dir_name, exist_ok=True)", "def _create_dir(self):\n self.out_fp = str(self.pb.wd + \n 'out_'+str(self.pb.conf_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the session directory if it does not exist.
def _create_session_dir_if_not_exists(self): if not os.path.exists(self.session_dir) or not os.path.isdir(self.session_dir): os.mkdir(self.session_dir)
[ "def makeSessionPath(self):\n try:\n pth = os.path.join(self.getSessionsPath(), self.__uid)\n if not os.access(pth, os.F_OK):\n os.makedirs(pth)\n return pth\n except: # noqa: E722 pylint: disable=bare-except\n return None", "def makeSessio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the entire session and decode to dict.
def _load_session(self): sess = {} self.lock.acquire() try: with open(self.session_dir + self.token, 'r') as f: sess = json.loads(f.read(), object_hook=self.from_json_converter) except (IOError, json.decoder.JSONDecodeError) as e: logging.warning(...
[ "def load_session(cls, storage, session_key):\n pass", "def load(self):\n try:\n return signing.loads(\n self.session_key,\n serializer=self.serializer,\n # This doesn't handle non-default expiry dates, see #19201\n max_age=self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if session file exists.
def exists(self): return os.path.exists(self.session_dir + self.token)
[ "def file_exists(self) -> bool:\n return self._path.exists()", "def exists(self):\n return self._session_id is not None", "def _credfile_exists(self):\n return os.path.exists(self.credfile_loc)", "def savefile_exists(self):\n\n return os.path.isfile(str(self.savefile) + '.qu')", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Commit the current session to the store.
def commit(self): SessionMemoryStore.sessions[self.token] = self.session
[ "def commit(self, session):\n session.commit()", "def session_commit(self, session):\n # this may happen when there's nothing to commit\n if not hasattr(session, 'meepo_unique_id'):\n self.logger.debug(\"skipped - session_commit\")\n return\n\n self._session_pub(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets new token, deletes old token.
def set_new_token(self, token): old = {} try: old = SessionMemoryStore.sessions[self.token] del SessionMemoryStore.sessions[self.token] except KeyError: pass SessionMemoryStore.sessions[token] = old self.token = token
[ "def delete_token(self, token):\n raise NotImplementedError", "def _renew_token(self):\n self.token = self._api_auth()", "def run(self):\n if self.token is not None:\n self.token.deleteFromStore()", "def deleteToken(self, token):\n taskMgr.remove(token.getDeleteTask())\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the current session tokens.
def all_session_tokens(session_dir): return [k for k in SessionMemoryStore.sessions.keys()]
[ "def get_session_and_token(self):\n self.get_session()\n self._get_token()", "def sessions(self):\r\n resp = self.request(\"get\", \"/sessions\")\r\n obj = self.json_body(resp)\r\n return obj['sessions']", "def tokens(self):\n return self._tokens", "def list_tokens(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a new session token.
def _generate_new_session_token(self, length=TOKEN_LENGTH): return str(secrets.token_hex(32))
[ "def get_new_token():\r\n\r\n return authenticate()[0]", "def getToken(self) -> str:\n return self.__newToken", "def new_token(self):\n log.info('Getting a new authorization token from apteligent')\n\n payload = {'grant_type': 'password', 'username': self.username,\n 'p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates expiry for this request.
def _update_expiry(self): try: self._store.session['expiry'] = self._get_new_session_expiry_from_now() except KeyError: pass
[ "def update_expiration_for_hit(HITId=None, ExpireAt=None):\n pass", "def set_expiry(response, **kw):\n delta = datetime.timedelta(**kw)\n if not datetime.timedelta(0) <= delta <= datetime.timedelta(365):\n raise ValueError(\"Expiry time must be between 0 and 365 days\")\n seconds = delta.days*2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store a value in the session.
def store(self, key, value): self._store.session[key] = value self.commit()
[ "def session(self, value):\n self.AUTH_ARGS[\"session\"] = value\n self._SESSION_DT = datetime.datetime.utcnow() if value else None", "def add_session(self, key, value):\n global http_session\n if not session_disabled:\n http_session[key] = value\n\n print('Add to ses...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get specific key from flash.
def get_flash(self, key): try: return self.flash_data()[key] except KeyError: return None
[ "def get_specific_key(problem_id, version, key):\n return 'do some magic!'", "async def retrieve_key(request):\n LOG.debug('Retrieve key')\n requested_id = request.match_info['requested_id']\n key_type = request.match_info['key_type'].lower()\n if key_type not in ('public', 'private'):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check CSRF token on this session.
def check_csrf(self, tokenToCheck): try: token = self._store.session['csrf'] if tokenToCheck != token: raise KeyError return True except KeyError: return False
[ "def _check_csrf_token(self, request):\n if self.csrf_cookie_name and self.csrf_field:\n csrf_token = request.cookies.get(self.csrf_cookie_name, None)\n if not csrf_token:\n return False\n if csrf_token != request.params.get(self.csrf_field, None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn a parenthesized string into a tree. >>> parse_hierarchy("This is a ((very) good) Example") ['This is a ', [['very'], ' good'], ' Example']
def parse_hierarchy(string): result = [] stack = [result] for item in re.split(parens, string): if item == '(': new = [] stack[-1].append(new) stack.append(new) elif item == ')': stack.pop() elif item: stack[-1].append(item)...
[ "def str2tree(s, binarize=False):\n if not s.startswith('('):\n s = \"( {} )\".format(s)\n if binarize:\n s = s.replace(\"(\", \"(X\")\n return Tree.fromstring(s)", "def tree_or_string(s):\n if s.startswith(\"(\"):\n return Tree.fromstring(s)\n return s", "def from_string(cls...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a set of strings from a tree. >>> tree = parse_hierarchy("This is a ((very) good) Example") >>> sorted(flatten_hierarchy(tree)) ['This is a Example', 'This is a good Example', 'This is a very good Example'] Note that whitespace is not handled intelligently (i.e. whitespace will neither be collapsed nor trimmed).
def flatten_hierarchy(tree): acc = set([""]) for item in tree: if isinstance(item, str): # It's a leaf. # Append the new string to each existing one in the accumulator. acc = {start + item for start in acc} else: # It's a tree, which means its contents are optional. ...
[ "def parse_hierarchy(string):\n result = []\n stack = [result]\n for item in re.split(parens, string):\n if item == '(':\n new = []\n stack[-1].append(new)\n stack.append(new)\n elif item == ')':\n stack.pop()\n elif item:\n stack[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use if a desired dataset and it was made with any of the desired settings
def use(data): if settings is not None: dsetting = ast.literal_eval(data['kwargs']) for setting in settings: setting_match = all(dsetting[k] == v for k, v in setting.items()) if setting_match: break e...
[ "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wraps an iterater such that it produces items in the background uses a bounded queue to limit memory consumption
def async_prefetch_wrapper(iterable, buffer=100): done = 'DONE'# object() def worker(q, it): for item in it: q.put(item) q.put(done) # launch a thread to fetch the items in the background queue = Queue.Queue(buffer) #pool = Pool() #m = Manager() #queue = m.Queu...
[ "def _iterqueue(queue):\n while 1:\n item = queue.get()\n\n if item is StopIteration:\n queue.put(StopIteration)\n break\n\n yield item", "def generate_from_queue(self):\n while True:\n yield self.input_queue.get()", "def queue():\n return asyncio.Queue()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Safely fetches production configuration from 3scale
def fetch_production_configuration(service): return service.proxy.list().configs.list(env="production")
[ "def is_production() -> bool:\n\n return conf(\"app.mode\") == \"prod\"", "def test_get_cloud_settings(self):\n pass", "def production():\n env.root = root = '/opt/www.commcarehq.org_project'\n env.virtualenv_root = _join(root, 'env/cchq_www')\n env.code_root = _join(root, 'src/commcare...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that service has production configuration after config promotion
def test_promote_istio_service(service2): assert len(fetch_production_configuration(service2)) > 0
[ "def is_production() -> bool:\n\n return conf(\"app.mode\") == \"prod\"", "def fetch_production_configuration(service):\n return service.proxy.list().configs.list(env=\"production\")", "def test_get_asset_service_configurations(self):\n pass", "def isProdHost():\n\n return _Control.TIER.name =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create point cloud object based on given coordinates and name.
def point_cloud(ob_name, coords, faces=[], edges=[]): # Create new mesh and a new object me = bpy.data.meshes.new(ob_name + "Mesh") ob = bpy.data.objects.new(ob_name, me) # Make a mesh from a list of vertices/edges/faces me.from_pydata(coords, edges, faces) # Display name and update the mesh ...
[ "def construct_pointcloud(points):\n\n pc = PointCloud()\n pc.points = Vector3dVector(np.asanyarray(points))\n\n return pc", "def visualize_pointcloud_new(pointcloud, name, save_path):\n # Open 3D can only store pointcloud as .ply\n save_file_ply = os.path.join(save_path, \"{}.ply\".format(name))\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
listAttributes() > [names of attributes]
def listAttributes(self): return list(self._attributes.keys())
[ "def listAttributes(object):\n prettyPrint(object.__dict__.keys())", "def get_attributes_names(self):\n return self.attributes_names", "def getAttrs(self):\n\t\treturn self._attributes", "def Attributes(self) -> _n_5_t_17:", "def print_attribute_list(self):\n p = prettytable.PrettyTable((\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
setAttribute( name, value) > None
def setAttribute(self, name, value): self._attributes[name] = value return
[ "def setOptionalAttribute(self, name, value):\n if value is not None:\n self.setAttribute(name, value)", "def setAttribute(self, attributename, attributevalue):\n\n self.send('setAttribute(\"' + attributename + '\", \"'\n + attributevalue + '\");')", "def set_attribute(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulates a game between two baseball Teams, prints the probability that team1 wins, and returns the probability that the home team wins.
def game(team1, team2): lineup1 = formBestLineup(team1) lineup2 = formBestLineup(team2) expRun1 = expectedRuns(lineup1) expRun2 = expectedRuns(lineup2) p = 0 for i in range(1, 21): for j in range(0, i): p += expRun1[i] * expRun2[j] print('\n\nProbability that '+team1.name...
[ "def round1(teams):\n wait_for_keypress()\n print()\n print(\"ROUND 1 (Everybody vs Everybody)\")\n print('================================', speak=False)\n print()\n points = [0 for i in range(len(teams))]\n round1 = []\n for i in range(5):\n for j in range(i+1, 5):\n ij =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the expected run distribution of a given baseball lineup.
def expectedRuns(lineup): transitionsMatrices = list(map(lambda Batter: Batter.transitionMatrixSimple(), lineup)) return simulateMarkovChain(transitionsMatrices)[:, 216]
[ "def get_lineup_efficiency(league: League, lineup: List[Player]) -> float:\n max_score = get_best_lineup(league, lineup)\n real_score = np.sum(\n [player.points for player in lineup if player.slot_position not in (\"BE\", \"IR\")]\n )\n return real_score / max_score", "def game(team1, team2):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a nearly optimal batting order, by assigning the best and worst player to their best possible positions when all other players are average, and then the second best and second worst around them, etc... until all positions are filled.
def formBestLineup(team): players = list(map(lambda Batter: Batter.transitionMatrixSimple(), team.batters)) averagePlayer = team.averagePlayer().transitionMatrixSimple() availablePositions = set(range(9)) bestLineup = [team.averagePlayer()] * 9 for bestRemaining in range(4): worstRemaining =...
[ "def sort_fitness(self):\n \"\"\" Сортирует пузырьком ботов в стаде по возрастанию (по fitness)\"\"\"\n\n _bot = Bot()\n _m = len(self.bots)\n for i in range(_m):\n for j in range(_m - i - 2):\n if self.bots[j].fitness < self.bots[j+1].fitness:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a nearly optimally worst batting order, by assigning the best and worst player to their best possible positions when all other players are average, and then the second best and second worst around them, etc... until all positions are filled.
def formWorstLineup(team): players = list(map(lambda Batter: Batter.transitionMatrixSimple(), team.batters)) averagePlayer = team.averagePlayer().transitionMatrixSimple() availablePositions = set(range(9)) worstLineup = [team.averagePlayer()] * 9 for bestRemaining in range(4): worstRemaining...
[ "def formBestLineup(team):\n players = list(map(lambda Batter: Batter.transitionMatrixSimple(), team.batters))\n averagePlayer = team.averagePlayer().transitionMatrixSimple()\n availablePositions = set(range(9))\n bestLineup = [team.averagePlayer()] * 9\n for bestRemaining in range(4):\n worst...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
txt_file is a text file with arraylike data the columns of txt_file will be saved as separate .1D files out_file_base is the prefix for the saved files the column number (1...ncolumns) will be appended to the prefix
def txt_to_1D(txt_file, out_file_base): # read in par file data = np.loadtxt(txt_file) # save the columns of data as separate .1D files for i in xrange(data.shape[1]): out_file = '{0}{1}.1D'.format(out_file_base, i+1) np.savetxt(out_file, data[:,i])
[ "def itx_to_txt_converter(raw_data_path, text_data_path):\r\n # creates list from the sorted files in the raw_data_path directory\r\n filelist = sorted(os.listdir(raw_data_path))\r\n linecount = int(input('please enter the first file number >> '))\r\n for file in filelist:\r\n if file.endswith('....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
mnemonic to seed takes a string..could use type or isinstance here..must be space not comma delimited..
def mnemonic_to_seed(mnemonic): if not validate_mnemonic(mnemonic): raise ValueError("Mnemonic is not valid") words = mnemonic.lower().split() seed_hexstring = '' y = 0 for x in range(16): # TODO: Use a look up to improve efficiency n = format(wordlist.index(words[y]), '012...
[ "def gen_mnemonic(num_words:int)->str:\n if num_words < 12 or num_words > 24 or num_words%3 != 0:\n raise RuntimeError(\"Invalid word count\")\n return bip39.mnemonic_from_bytes(rng.get_random_bytes(num_words*4//3))", "def alphacode(pin):\n for i in range(2):\n x = pin \n\n \n mnem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download images using Solr as the datasource. Return urls that cannot be downloaded
def runWithSolrAsDataSource(solrUrl, rootDestinationDir, finalDestinationDir, map_urls): notDownloaded = [] v = json.loads(requests.get(solrUrl).text) docs=v['response']['docs'] numFoundInSolr=v['response']['numFound'] for doc in docs: download_file_path=doc['download_file_path'] do...
[ "def collect_images(self):\n self.__get_images_link()\n url_length: int = len(self.img_url_list)\n self.logger.info(\"Starting downloading for {} images...\".format(url_length))\n for url, index in zip(self.img_url_list, range(url_length)):\n state, image, shape = self.downloa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create path to store files that could not be downloaded. This uses the current date and time and the root destination dir
def createNotDownloadedOutputPath(rootDestinationDir): import time from datetime import datetime today = datetime.fromtimestamp(time.time()) return os.path.join(rootDestinationDir, today.strftime("%Y%m%d_%H%M%S_could_not_download.txt"))
[ "def create_needed_paths(self):\n os.makedirs(self.logs_directory, exist_ok=True)\n os.makedirs(self.models_directory, exist_ok=True)", "def BuildDestinationPath():\n import getpass\n import datetime\n \n myToday = datetime.date.today()\n myWeekDay = '%d' % myToday.isoweekday() \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }