query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get the first n heptagonal numbers.
def get_heptagonals(num): return [int(i * (5 * i - 3) / 2) for i in range(1, num + 1)]
[ "def H(n):\r\n if n <= -8:\r\n return H(n+5) + H(n+4) + H(n+2)\r\n elif -8 < n and n < 10:\r\n return n\r\n else: # n >= 10\r\n return H(n-8) + H(n-5) + H(n-3)", "def pentagonal(n: int) -> int:\n # Find the pentagonal number to nth degree.\n pentagonal_number = (n * ((3 * n) -...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first n octagonal numbers.
def get_octagonals(num): return [int(i * (3 * i - 2)) for i in range(1, num + 1)]
[ "def octaves_for_note(note):\n return sorted([n for n in range(note,minC-1,-12)] + [n for n in range(note,maxC+1,12)])", "def pentagonal(n: int) -> int:\n # Find the pentagonal number to nth degree.\n pentagonal_number = (n * ((3 * n) - 1) // 2)\n\n # Find the total number of dots.\n dots = ((n-1) ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute kcliques in the graph.
def compute(self, k): from collections import defaultdict assert isinstance(k, int) and k >= 2 # look-up case if k in self.cliques.keys(): return self.cliques[k] k_cliques = set() # base case: k = 2 if k == 2: for i in range(0, self.num_v...
[ "def run_k_cliques(self, smallest_clique):\n start = time.time()\n cliques = None\n if self.graph is not None:\n cliques = k_clique_communities(self.graph,smallest_clique)\n end = time.time()\n print(\"K-Cliques ime taken: {0}\".format(end-start))\n return clique...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decipher a message using XOR. text a list of integers corresponding to the ASCII value of characters. key a list of characters used as keys.
def xor_decipher(text, key): deciphered = [] key_length = len(key) key_ascii = [ord(_k) for _k in key] for i, _ascii in enumerate(text): deciphered.append(chr(_ascii ^ key_ascii[i % key_length])) return "".join(deciphered)
[ "def xor(text, key):\n #pad = (key * (len(text) // len(key) + 1))[:len(text)]\n pad = CycleStr(key)\n return ''.join([chr(a ^ b) for a, b in zip(text, pad)])", "def decipher(ciphertext, key):\n return \"\".join(chr(ord(c)^ord(k)) for c, k in zip(ciphertext, cycle(key)))", "def xor(msg: bytes, key: b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a triangleshaped array, determine the max sum of elements along a downward path. arr the input array. row_idx the index of the row where the path terminates.
def max_sum_path_in_triangle(arr, row_idx=-1): # dynamic programming: tile it up by cumulative scores, row by row points = [] for i, _row in enumerate(arr): # base case: the first row if i == 0: points.append(_row[:]) else: tmp_row = [] last_idx = ...
[ "def findMaxPathDownTriangle(triangle):\n dp_table = TriangleOfNumbers()\n n_rows = len(triangle.data)\n\n for irow in xrange(0,n_rows):\n dp_table.add_row([0]*(irow+1))\n\n irow = 0\n icol = 0\n\n for irow in xrange(0, n_rows):\n n_cols = len(triangle.data[irow])\n\n for icol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the square root of a number, build a continued fraction sequence and put that into a generator
def sqrt_continued_fraction_generator(num): import sympy return sympy.ntheory.continued_fraction_iterator(sympy.sqrt(num))
[ "def continued_fraction_of_root(n, max_iter=1000):\n root = n ** .5 # root(23)\n a = int(root) # a_0\n yield a\n nm, dr = 1, -a\n for _ in xrange(max_iter):\n d = (n - dr**2) / nm\n a = int((root - dr) / d)\n dr, nm = -dr - a * d, d\n yield a", "def continued_fraction(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compile an integer sequence (continued fraction representation) into its corresponding fraction.
def compile_continued_fraction_representation(seq): from fractions import Fraction # sanity check assert seq # initialize the value to be returned by working backwards from the last number retval = Fraction(1, seq.pop()) # keep going backwords till the start of the sequence while seq: ...
[ "def _cfrac_convergents(S):\n \n n0,n1 = 0,1\n d0,d1 = 1,0\n \n for c in S:\n n0,n1 = n1,c*n1 + n0\n d0,d1 = d1,c*d1 + d0\n \n yield Fraction(n1,d1)", "def fractions():\n from fractions import Fraction\n return tuples(integers(), integers(min_value=1)).map(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if two numbers are related by digit permutation.
def related_by_digit_permutation(num_a, num_b): from collections import Counter return Counter(str(num_a)) == Counter(str(num_b))
[ "def is_permutation(a, b):\r\n \r\n return sorted(list(str(a))) == sorted(list(str(b)))", "def two_adjacent_digits_same(number):\n digits = separate_digits(number)\n\n i = 0\n while i < len(digits) - 1:\n if digits[i] == digits[i+1]:\n return True\n i += 1\n return False...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the lattice by defining which vertices are connected without assuming the size of the lattice. neighbor_function(row_idx, col_idx, row_dim, col_dim) returns a list of (row_idx, col_idx) neighbors. weight_function(matrix, head_row_idx, head_col_idx, tail_row_idx, tail_col_idx) returns the weight of the edge f...
def __init__(self, matrix, neighbor_function, weight_function): self.lattice = matrix self.row_dim = len(self.lattice) self.col_dim = len(self.lattice[0]) self.neighbor_function = neighbor_function self.weight_function = weight_function self.consistency_check() se...
[ "def periodic_lattice(node_number, neighbors):\n import numpy as num\n from kreveik import *\n from kreveik.classes import TopologicalNetwork \n adjacency_matrix = num.zeros((node_number,node_number))\n for i in range(node_number):\n for j in range(neighbors):\n adjacency_matrix[i][...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that (1) the lattice is indeed rectangular; (2) the neighbor function is callable; (3) the weight function is callable.
def consistency_check(self): for _row in self.lattice: assert len(_row) == self.col_dim assert callable(self.neighbor_function) assert callable(self.weight_function)
[ "def testNeighborMasking(self):\n \"\"\"\n We create another object separated from the one of\n interest, which should be masked.\n \"\"\"\n self.checkCandidateMasking([(self.x+5, self.y, 1.0)])", "def CheckBounds(self, ):\n ...", "def test_just_inside():\n rmg = Ras...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flatten a 2D index to a 1D index.
def flatten_index(self, i, j): return i * self.col_dim + j
[ "def unflatten_index(self, idx):\n return idx // self.col_dim, idx % self.col_dim", "def flatten(self) -> WordIndex:\n index = self.index.flatten()\n return self._from_index(index)", "def flatten_idxs(idx_in, jaggedarray):\n if len(idx_in) == 0:\n return numpy.array([], dtype=nump...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unflatten a 1D index to a 2D index.
def unflatten_index(self, idx): return idx // self.col_dim, idx % self.col_dim
[ "def flatten_index(self, i, j):\n return i * self.col_dim + j", "def flatten(self) -> WordIndex:\n index = self.index.flatten()\n return self._from_index(index)", "def flatten_idxs(idx_in, jaggedarray):\n if len(idx_in) == 0:\n return numpy.array([], dtype=numpy.int)\n idx_out ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subroutine to update the shortest disance, and the maxindex internal node associated, from node i and node j.
def __update_single_pair(self, i, j): update_value = ( self.__min_distance[i][self.__cap_internal] + self.__min_distance[self.__cap_internal][j] ) # distance updates can be done in-place because all values used to compute update_value sit in the union of a row and a colum...
[ "def update_short_dist_to_EI(graph_links, graph_dist, EI, dist_to_EI=0):\n if graph_dist[EI] > dist_to_EI:\n graph_dist[EI] = dist_to_EI\n for N in graph_links[EI]:\n update_short_dist_to_EI(graph_links, graph_dist, N, dist_to_EI+1)", "def dijkstra(self):\n\n # Initialise the ne...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reverse an adjacency list. This is only relevant for directed graphs. For undirected graphs, the reverse is just the same as the original.
def reverse_adj_list(adjacency_list): # determine the number of vertices n = len(adjacency_list) # initialzie the adjacency list to be returned retlist = [] for i in range(0, n): retlist.append([]) # loop over all nodes for i, l in enumerate(adjacency_list): # create an edge ...
[ "def reverse(self):\n cur_node = self.getHead()\n prev_node = None\n\n while cur_node is not None:\n next_node = cur_node.getNext()\n cur_node.setNext(prev_node) # reverse Node link\n prev_node = cur_node\n cur_node = next_node\n\n self.setHead...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subproblem in dynamic programming. Count the number of descending sequences given a total and the head. Note that a oneterm sequence is also considered a sequence.
def num_desc_seq_given_total_and_head(total, head): if total < 1 or head < 1: return 0 # base case: sequence has only one term if total == head: return 1 # recursive case: sequence has more than one term # the second term cannot exceed the head; take advantage of transitivity n...
[ "def num_desc_prime_seq_given_total_and_head(total, head, list_of_primes, set_of_primes):\n # sanity check\n assert head in set_of_primes, f\"total: {total}, head: {head}\"\n assert total >= head, f\"total: {total}, head: {head}\"\n\n # base case: sequence has only one term\n if total == head:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subproblem in dynamic programming. Using a precomputed list & set of primes, count the number of descending prime sequences given a total and the head. Note that a oneterm sequence is also considered a sequence.
def num_desc_prime_seq_given_total_and_head(total, head, list_of_primes, set_of_primes): # sanity check assert head in set_of_primes, f"total: {total}, head: {head}" assert total >= head, f"total: {total}, head: {head}" # base case: sequence has only one term if total == head: return 1 ...
[ "def num_desc_seq_given_total_and_head(total, head):\n if total < 1 or head < 1:\n return 0\n\n # base case: sequence has only one term\n if total == head:\n return 1\n\n # recursive case: sequence has more than one term\n # the second term cannot exceed the head; take advantage of tran...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates coprime Pythagorean triplets where the greatest of the triplet is under some bound, and the generating (n, m) pairs are also optionally bounded.
def pythagorean_triplets( bound, ratio_lower_bound=0.0, ratio_upper_bound=1.0, coprime=True ): from math import sqrt, ceil, floor if coprime: fac = Factorizer(bound) bound_for_iteration = ceil(sqrt(bound)) triplets = [] # use the formula: (m^2 - n^2)^2 + (2mn)^2 = (m^2 + n^2)^2 for ...
[ "def pythagorean_triplets():\n c = 4\n while True:\n c += 1\n a_min = int(sqrt(2 * c - 1))\n a_max = int(c / sqrt(2)) + 1\n\n for a in range(a_min, a_max):\n b = int(sqrt(c * c - a * a))\n if a ** 2 + b ** 2 == c ** 2:\n yield (a, b, c)", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursive approach to generate array element partitions.
def generate_all_partitions_from_list(arr): if len(arr) == 0: yield [], [] else: for _l, _r in generate_all_partitions_from_list(arr[:-1]): yield [*_l, arr[-1]], _r yield _l, [*_r, arr[-1]]
[ "def partitionMemories(ir):\n for mems in partitions:\n partition_name=mems[0]\n dimention_to_partition=int(mems[1])\n settings=mems[2:][0]\n dim, dataType = getArrayInfo(partition_name)\n\n # Settings for fully partitioning\n if settings[0]=='*':\n print(\"\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a set, represented as a sorted typle, is a special sum set. Any set A is a special sum set iff for any nonempty disjoint subsets B and C S(B) != S(C) If B contains more elements than C then S(B) > S(C)
def is_special_sum_set(set_as_sorted_tuple, verbose=False): # basic set check: no duplicates if len(set_as_sorted_tuple) != len(set(set_as_sorted_tuple)): return False # compute all the sums of non-empty subsets # note that we do not need to check for disjointness because # any common eleme...
[ "def subset_sum(S, total):\n\n if total == 0 and set:\n # Can use empty set\n return True\n\n if not set and total:\n # There are not elements in teh set and total > 0\n return False\n\n # Create a cache adn initiliaze all values as False\n cache = [[False for _ in range(tota...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover at least m consecutive blocks.
def block_tiling_flexible_1d(m, n): end_in_red = [*[0 for _ in range(m - 1)], 1] end_in_black = [1 for _ in range(m)] for i in range(m, n): _reds = end_in_red[i - 1] + end_in_black[i - m] _blacks = end_in_red[i - 1] + end_in_black[i - 1] end_in_red.append(_reds) end_in_black...
[ "def block_tiling_fixed_1d(m, n):\n end_in_red_end = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red_end[i - m] + end_in_black[i - m]\n _blacks = end_in_red_end[i - 1] + end_in_black[i - 1]\n end_in_red_end.append(_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover exactly m consecutive blocks.
def block_tiling_fixed_1d(m, n): end_in_red_end = [*[0 for _ in range(m - 1)], 1] end_in_black = [1 for _ in range(m)] for i in range(m, n): _reds = end_in_red_end[i - m] + end_in_black[i - m] _blacks = end_in_red_end[i - 1] + end_in_black[i - 1] end_in_red_end.append(_reds) ...
[ "def block_tiling_flexible_1d(m, n):\n end_in_red = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red[i - 1] + end_in_black[i - m]\n _blacks = end_in_red[i - 1] + end_in_black[i - 1]\n end_in_red.append(_reds)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the number of ways to tile n black blocks with red paint. Each group of red paint must cover exactly m consecutive blocks where m has multiple choices.
def block_tiling_multifixed_1d(m_values, n): m_values = sorted(m_values) m_min = m_values[0] end_in_red_end = [*[0 for _ in range(m_min - 1)], 1] end_in_black = [1 for _ in range(m_min)] for i in range(m_min, n): _reds, _blacks = 0, 0 for _m in m_values: if i < _m - 1: ...
[ "def block_tiling_flexible_1d(m, n):\n end_in_red = [*[0 for _ in range(m - 1)], 1]\n end_in_black = [1 for _ in range(m)]\n\n for i in range(m, n):\n _reds = end_in_red[i - 1] + end_in_black[i - m]\n _blacks = end_in_red[i - 1] + end_in_black[i - 1]\n end_in_red.append(_reds)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a datetime to the given timezone. The tz argument can be an instance of tzinfo or a string such as 'Europe/London' that will be passed to pytz.timezone. Naive datetimes are forced to the timezone. Wise datetimes are converted.
def force_tz(obj, tz): if not isinstance(tz, tzinfo): tz = pytz.timezone(tz) if (obj.tzinfo is None) or (obj.tzinfo.utcoffset(obj) is None): return tz.localize(obj) else: return obj.astimezone(tz)
[ "def dt_to_zone(dt, tzstring):\n return dt.astimezone(pytz.timezone(tzstring))", "def ensure_timezone(dt, tz=None):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=tz or tzlocal())\n else:\n return dt", "def transform_timezone(dt, from_tz_str, to_tz_str):\n if from_tz_str == to_tz_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the override for a specific flag, given a context.
def get_override(self, flag: Flag, **kwargs) -> Any: raise NotImplementedError
[ "def get_override(self, flag: Flag, **kwargs) -> Any:\n return flag.cast_string(SiteFlagOverride.objects.get(name=flag.name).value)", "def get_flag(flag = 'exit'):\n return flag in FLAG and FLAG[flag]", "def Get(self, flag_name):\n return self.flags.get(flag_name)", "def should_override(self, flag:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make sure we can get a dfname with at least the filename attribute being correct, from a pagerange passed in
def test_make_dfname_from_pagerange(self): converter = DFNamePageRangeConverter(self.en['wiki'], "pages-articles", "xml", "bz2", verbose=False) dfname = converter.make_dfname_from_pagerange((230, 295), 2) expected_filename = 'enwiki-{today}-pages-arti...
[ "def get_dfnames_from_pageranges(self, pageranges):\n dfnames = []\n for startpage, endpage, partnum in pageranges:\n dfname = DumpFilename(\n self.wiki, self.wiki.date, self.dumpname,\n self.filetype, self.file_ext, partnum,\n DumpFilename.make_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make sure that for conf with checkpoints disabled, we get a good list of output files to be produced, with or without part numbers
def test_get_nochkpt_outputfiles(self): # turn off checkpoints in the config but keep part numbers self.en['wiki'].config.checkpoint_time = 0 pages_per_part = FilePartInfo.convert_comma_sep( self.en['wiki'].config.pages_per_filepart_history) content_job = XmlDump("articles"...
[ "def missingoutputfiles(self):\n return self.getmissingoutputfiles(self.SlideID, **self.workflowkwargs)", "def train_output():\n output = [config['out']+\"/{sample}/model.rda\", config['out']+\"/{sample}/variable_importance.tsv\"]\n if check_config('tune'):\n output.append(config['out']+\"/{sample...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make sure that we get good list of page ranges covered by stubs when we feed in fake tuples describing what the stubs cover
def test_get_ranges_covered_by_stubs(self, mock_get_first_last_page_ids, mock_list_outfiles_for_input): mock_list_outfiles_for_input.return_value = self.set_stub_output_filenames([1, 2, 3, 4]) mock_get_first_last_page_ids....
[ "def get_ranges_covered_by_stubs(self, dump_dir):\n output_dfnames = self.oflister.get_reg_files_for_filepart_possible(\n self.oflister.makeargs(dump_dir, self.list_dumpnames(), self.get_fileparts_list()))\n stub_dfnames = [self.stubber.get_stub_dfname(dfname.partnum, dump_dir)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make sure that we get a reasonable list of completed pageranges when we feed in a list of complete output files (supposedly found in the dump run output directory)
def test_get_done_pageranges(self, mock_list_checkpt_files): pagerange_strings = {1: ['p1p48', 'p49p65', 'p66p82'], 2: ['p135p151', 'p152p168', 'p169p185', 'p203p295'], 3: ['p301p319', 'p320p384', 'p438p461', 'p577p599'], 4: ...
[ "def get_done_pageranges(self, dump_dir, date):\n chkpt_dfnames = self.oflister.list_checkpt_files(\n self.oflister.makeargs(dump_dir, [self.get_dumpname()],\n parts=PARTS_ANY, date=date))\n # get the page ranges covered by existing checkpoint files\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a number of parts, put together a list of DumpFilenames for the corresponding stub output files and return them
def set_stub_output_filenames(self, parts): stub_filenames = [] for partnum in parts: stub_filenames.append( "{wiki}-{date}-stub-articles{partnum}.xml.gz".format( wiki=self.en['wiki'].db_name, date=self.today, partnum=partnum)) ...
[ "def getBedOutFiles(args):\n bed = os.path.join(args.outDir, 'out.bed')\n bedDetails = os.path.join(args.outDir, 'out_details.bed')\n return bed, bedDetails", "def get_output_files(description):\n log.info(\"fixing output files in description\")\n files = {}\n if description['outFiles'] and descriptio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a DumpFilename, return a reasonable first and last page id for the file for testing
def get_fake_first_last_pageids(xml_dfname, _dump_dir, _parts): page_id_info = {1: [1, 100], 2: [101, 300], 3: [301, 600], 4: [601, 3400]} if xml_dfname.partnum_int is None or xml_dfname.partnum_int not in page_id_info: ...
[ "def get_id_from_filename(html_filename):\n\treturn html_filename[ html_filename.rindex('_') + 1 : -len('.html') ]", "def extract_visit(filename):\n # First, make sure we are only working with a filename, not a full path+filename\n filename = os.path.basename(filename)\n if filename.startswith('hst_'):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create empty fake page content files in output directory for testing, with part numbers
def setup_empty_pagecontent_files_parts(self, partnums): basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today) for partnum in partnums: filename = "{wiki}-{date}-pages-articles{partnum}.xml.bz2".format( wiki=self.en['wiki'].db_name, date=self.today, partnum=part...
[ "def setup_empty_pagecontent_file(self):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n filename = \"{wiki}-{date}-pages-articles.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today)\n path = os.path.join(basedir, filename)\n with open(pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create single empty fake page content file in output directory (coveing all page content for the wiki) for testing
def setup_empty_pagecontent_file(self): basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today) filename = "{wiki}-{date}-pages-articles.xml.bz2".format( wiki=self.en['wiki'].db_name, date=self.today) path = os.path.join(basedir, filename) with open(path, "w") as ...
[ "def setup_empty_pagecontent_files_parts(self, partnums):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n for partnum in partnums:\n filename = \"{wiki}-{date}-pages-articles{partnum}.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today, p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read all sensors and publish the results to the MQTT broker
def publish(): print("Publishing Thread") client = start_client(PUBLISH_CLIENT_ID) while publishing: illuminance = read_light_sensor() temp, hum = read_temperature_humidity() readings = { 'pi1_timestamp': datetime.now().isoformat(), 'illuminance': read_light_s...
[ "def run(self):\n time.sleep(5)\n while(1):\n time.sleep(5)\n temperature = SensorData_Object.getTemperature()\n self.temp_value.set_value(temperature) # Publish Temperature Sensor Data\n \n humidity = SensorData_Object.getHumidity()\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listen for new messages on subscribed topic, start the publisher and
def listen(publisher): client = start_client(LISTEN_CLIENT_ID) client.subscribe(SUBSCRIBER) print('Subscribed to topic.') while listening: client.loop(.1)
[ "def listen(publisher):\n global client\n client.subscribe(SUBSCRIBER_TOPIC, 1, on_message)\n print('Subscribed to topic.')\n while listening:\n time.sleep(10)", "def start_mqtt():\n with app.app_context():\n sub = Subscriber()\n sub.subscribe()", "def subscribe(listener, top...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function to run parallel training with flags from command line
def main(arg): del arg params_list = None model_ids = None nb_jobs = FLAGS.NB_JOBS if FLAGS.params: params_list = eval("config."+FLAGS.params) nb_jobs = min(FLAGS.NB_JOBS, len(params_list)) print('combinations: {}'.format(len(params_list))) elif FLAGS.model_ids: t...
[ "def main():\n\n # TODO: define:\n # step+noize\n # log scale instead of uniform\n\n # Define parametter: [min, max]\n dictParams = {\n \"batchSize\": [int, [1, 3]],\n \"learningRate\": [float, [1, 3]]\n }\n\n # Training multiple times with different parametters\n for i in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the randomizer's domain parameter distribution for one domain parameter.
def adapt_one_distr_param( self, domain_param_name: str, domain_distr_param: str, domain_distr_param_value: Union[float, int] ): for dp in self.domain_params: if dp.name == domain_param_name: if domain_distr_param in dp.get_field_names(): # Set the new...
[ "def _get_wrapper_domain_param(self, domain_param: dict):\n # Cast the factor value to int, since randomizer yields ndarrays or Tensors\n self._factor = int(domain_param.get(\"downsampling\", self._factor))", "def rescale_distr_param(self, param: str, scale: float):\n if not scale >= 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rescale a parameter for all distributions.
def rescale_distr_param(self, param: str, scale: float): if not scale >= 0: raise pyrado.ValueErr(given=scale, ge_constraint="0") for dp in self.domain_params: if hasattr(dp, param): # Scale the param attribute of the domain parameters object seta...
[ "def scale(self, value):\n\t\tfor val in self.dilutions:\n\t\t\tself.dilutions[val] /= value", "def _normalize_param_scaling(self):\n\n if 'original_units_in_meter' in self.properties: # pattern was scaled\n scaling = 100 / self.properties['original_units_in_meter']\n for parameter...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the application banner.
def print_banner(): print( """\033[92m _____ _ _ _ _____ _____ | | | | |_ _| __| | | | | | | | | | __| |_____|_____| |_| |__| @owtfp http://owtf.org Version: {0} \033[0m""".format( __version__ ) )
[ "def display_banner():\n\n banner = \"\"\"\n __ _____ ___ ___ __ \n / / / / _ \\/ _ \\ ____/ (_)__ ___ / /_\n / /_/ / // / ___/ / __/ / / -_) _ \\/ __/\n \\____/____/_/ \\__/_/_/\\__/_//_/\\__/ \n\n \"\"\"\n print(banner, flush=True)", "def _banner() -> None:\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
graph GraphWithV8 to find embeddings on This code finds all the jumps for the given GraphWithV8 A jump is an edge between 1) Two non cofacial edges 2) One vertex and one edge that are not cofacial 3) Two non cofacial vertices THIS RETURNS A LIST OF NEW GRAPHS WITH JUMPS. EACH ONE A NEW JUMP
def findJumps(graph, embedding): #check with stage2EmbeddingTest whether current graph embedds in firstCurrentlyEmbedding vertices = graph.getVertices() vertexFaces = getVertexFaces(vertices, embedding) vertexJumps = findVertexJumps(graph, vertices, vertexFaces) edges = graph.getEdges() edgeFace...
[ "def graph_to_cycles(graph: list, breakpoint_graph=False) -> list:\n nodes = defaultdict(list)\n\n for i in graph:\n nodes[i[0]].append(i[1])\n nodes[i[1]].append(i[0])\n\n # If the graph is the breakpoint graph, two ends of the synteny block is connected to each other\n if not breakpoint_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the diff for a given file name contains a change of the serialVersionUID.
def was_serial_id_changed(file_name): result = subprocess.run(['git', 'diff', '--cached', '--unified=0', file_name], stdout=subprocess.PIPE); lines = result.stdout.decode('utf_8').split("\n") for line in lines: if 'serialVersionUID' in line: print("found") return True ret...
[ "def _is_changed(self, concatted_file):\r\n tmp_concatted = '%s.tmp' % concatted_file\r\n if (os.path.exists(concatted_file) and\r\n os.path.getsize(concatted_file) == os.path.getsize(tmp_concatted)):\r\n orig_hash = self._file_hash(concatted_file)\r\n temp_hash = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given file contains a serialVersionUID.
def contains_file_serial_id(file_name): with open(file_name, 'rb', 0) as file, \ mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as file_contents: return file_contents.find(b'serialVersionUID') != -1
[ "def was_serial_id_changed(file_name):\n result = subprocess.run(['git', 'diff', '--cached', '--unified=0', file_name], stdout=subprocess.PIPE);\n lines = result.stdout.decode('utf_8').split(\"\\n\")\n for line in lines:\n if 'serialVersionUID' in line:\n print(\"found\")\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a file name has a .java ending
def is_java_file(file_name): return file_name.endswith(".java")
[ "def is_python_filename(name: str) -> bool:\n return os.path.exists(name) and name.endswith(\".py\")", "def isCFile(filename):\n return filename.endswith(\".c\") or filename.endswith(\".cpp\") or filename.endswith(\".cc\")", "def _check_name(self, filename: str) -> bool:\n pattern = r'[\\/\\\\\\:\\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup the matrix of the linear system with Dirichlet boundary conditions to be solved. The matrix is on the form A = [ T I 0 0 0 ... 0 0 0 I T I 0 0 ... 0 0 0 0 I T I 0 ... 0 0 0 ... 0 0 0 0 0 ... 0 I T ] where T = [ 4 1 0 0 0 ... 0 0 0 1 4 1 0 0 ... 0 0 0 0 1 4 1 0 ... 0 0 0 ... 0 0 0 0 0 ... 0 1 4 ] and I is the iden...
def setup_A(self): # first column of the toeplitz matrices that make up the diagonal of # the matrix column = scipy.concatenate((scipy.array([-4, 1]), scipy.zeros(self.northwall.len - 2))) # set up toeplitz matrix that make up the block iagonal of t...
[ "def LinearSystem(self):\n # assembly matrix of linear system\n # to solve u(t) based on u(t-1) and u(t-2)\n # the matrix includes all future values of u\n # in the entire grid, so size is the number of cells\n # start with zeros that is also the boundary condition u(t)=0\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
str > str Get hashcode from URL
def get_hash(url): response = requests.get(url) return response.text
[ "def get_hash(link, *, hash_type = 'md5'):\n data = requests.get(link).content\n m = eval('hashlib.{}(data)'.format(hash_type))\n return m.hexdigest()", "def path_hash(path_string: str):\n hashable = path_string.replace(\"\\\\\", \"/\")\n if not hashable.startswith(\"/\"):\n hash...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
int > str Builds a 7 characters string from an int
def int_to_7char_str(i): #the pins always have 7 digits pin = str(i) l = len(pin) if (l < 7): zeros = "" for j in range(7-l): zeros += "0" pin = zeros + pin return pin
[ "def convert_int(n: int) -> str:\n\n return str(n)", "def _str(i: int) -> str:\n if i < 0 or i > 999:\n raise ValueError(\"0 <= i <= 999\")\n if 0 <= i <= 9:\n s = \"__\" + str(i)\n elif 10 <= i <= 99:\n s = \"_\" + str(i)\n else:\n s = st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dic > void builds a dictionary storing all the 107 7 digits pin by hash code
def build_hash_dictionary(hash_dic): for i in range(10**7): pin = int_to_7char_str(i) hash_code = encode_hash(pin) hash_dic[hash_code] = pin
[ "def generateDictionary(n=4):\r\n m = 2**n\r\n d = dict()\r\n for i in range(m):\r\n format = \"{0:0\"+str(n)+\"b}\"\r\n binary = list(format.format(i))\r\n d[i] = [int(binary[j]) for j in range(len(binary))]\r\n\r\n return d", "def generate_num_dict(hand: list[str]) -> dict:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
str > str encrypts a PIN with md5, for testing purposes
def encode_hash(pin): return hashlib.md5(pin.encode()).hexdigest()
[ "def Radius_User_Password(password, secret, authenticator):\r\n password_length = struct.pack(\"!B\",len(password))\r\n padd_size = 16 - (len(password) % 16)\r\n \r\n try:\r\n p = password.encode(\"utf-8\")\r\n except AttributeError:\r\n p = password\r\n \r\n while padd_size > 0:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test accuracy of learned model using unary features only (i.e., just predicting each pixel independently.) Implemented via a separate logistic regression.
def test_unary_only(self): num_features = 65 num_states = 2 all_pixel, all_label = load_all_images_and_labels(os.path.join(os.path.dirname(__file__), 'train_data'), num_features, 3) initial_w = np.zeros(num_features * num_states) res = minimize(objective, initial_w, method="L-B...
[ "def logistic_regression(x_train, x_test, y_train, y_test):\n\tlog_reg_model = LogisticRegression()\n\tlog_reg_model = log_reg_model.fit(x_train, y_train)\n\n\tpredicted_labels = log_reg_model.predict(x_test)\n\tprint(log_reg_model.score(x_test, y_test))", "def logistic_regression():\n train_x, test_x, train_y...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the spanningtree edge appearance probability computations (used for TRBP) are correct.
def test_tree_probability_calculation(self): height = 3 width = 3 tree_prob = ImageLoader.calculate_tree_probabilities_snake_shape(width, height) assert (tree_prob[(0, 0), (0, 1)] == 0.75), "side edge probability does not equal to 0.75" assert (tree_prob[(0, 1), (0, 0)] == 0.75),...
[ "def test_random_spanning_tree_multiplicative_large():\n from math import exp\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n gamma = {\n (0, 1): -0.6383,\n (0, 2): -0.6827,\n (0, 5): 0,\n (1, 2): -1.0781,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the loaded model has the correct matrix structure.
def test_model_matrix_structure(self): loader = ImageLoader(10, 10) train_dir = os.path.join(os.path.dirname(__file__), 'train_data') images, models, labels, names = loader.load_all_images_and_labels(train_dir, 2, 1) model = models[0] model.create_matrices() for edge...
[ "def test_model(model, datamodule):\n # TODO - replace expected shape\n expected_shape = (4, 1, 1)\n assert model(*next(datamodule.val_dataloader())).shape == expected_shape", "def testModel(self, path):\n l = ObjLoader()\n self.model = l.loadModel(path)\n self.model.setCanvas(self.m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays the main report from llvmcov
def show_summary(cov_executable): print( subprocess.run( ["llvm-cov", "report", "--instr-profile=default.profdata", cov_executable] ) )
[ "def coverage_report(c):\n c.run('coverage html && open htmlcov/index.html', pty=True)", "def test():\r\n from spyderlib.utils.qthelpers import qapplication\r\n app = qapplication()\r\n widget = CoverageWidget(None)\r\n widget.show()\r\n widget.analyze(__file__)\r\n sys.exit(app.exec_())", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expand mpi sequences for each omp
def expand_mpi(self, mpi): for omp in self.omp: max_ranks = int(self.max_cores / omp) if mpi is None: self.mpi[omp] = tools.expand_power_sequence(largest=max_ranks) elif isinstance(mpi, int): self.mpi[omp] = tools.expand_power_sequence(largest...
[ "def moveSequence(i : int, seq): # TEST\n for elt in seq:\n elt.moveBy(i)", "def addGapsToHMMSeqs(self):\n for seq in self.records:\n seq.seq.insertAllGaps(self.total_gaps)", "def seq_solve(cols,rows,board,processfirst,seq_processor,seq_priority_stgy,\n cache):\n \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expand leaf sequences for each omp
def expand_leaf(self, leaf): for omp in self.omp: if leaf is None: if self.scaling_type == 'strong': max_ranks = int(self.max_cores / omp) self.leaf[omp] = max_ranks * np.array(self.leaf_per_max_rank) elif self.scaling_type == ...
[ "def expand_mpi(self, mpi):\n for omp in self.omp:\n max_ranks = int(self.max_cores / omp)\n\n if mpi is None:\n self.mpi[omp] = tools.expand_power_sequence(largest=max_ranks)\n elif isinstance(mpi, int):\n self.mpi[omp] = tools.expand_power_sequ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract multidimensional table of model timing data
def extract_data(self): print('Extracting performance data') omp_dict = {} for omp, omp_set in self.models.items(): leaf_dict = {} for leaf, leaf_set in omp_set.items(): mpi_dict = {} for mpi, mod in leaf_set.items(): ...
[ "def generate_time_tables(self):\n from dbanalysis.classes import weather_getter\n self.total_routes = 0\n self.failed_routes = 0\n w_getter = weather_getter.weather_getter()\n weather = w_getter.get_weather()\n import datetime\n dt = datetime.datetime.now()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return timing table of specific model
def get_model_table(self, leaf, omp, mpi): m = self.models[omp][leaf][mpi] return m.table
[ "def get_model_time(db_name, img_num, model_name):\n #print(\"Connecting to database...\")\n connection = connect_db(db_name)\n cursor = connection.cursor()\n \n cmd = 'SELECT performance FROM exec_data WHERE model_name=\\''+model_name+'\\' and img_num=\\''+str(img_num)+'\\''\n cursor.execute(cmd)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return array of total leaf blocks versus mpi
def get_leaf_blocks(self, leaf, omp): if self.scaling_type == 'strong': return np.full_like(self.mpi[omp], leaf) elif self.scaling_type == 'weak': return leaf * self.mpi[omp]
[ "def total_nodes(self):\n return np.vstack((self.nodes, self.hanging_nodes))", "def num_blocks(self):\n return self._num_blocks", "def _get_block_ids_for_sparse_super(self, total_block_count, blocks_per_group) \\\r\n -> []:\r\n block_ids = []\r\n total_block_group_count = int(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup fig, ax, checking if ax already provided
def _setup_fig_ax(self, ax): fig = None if ax is None: fig, ax = plt.subplots() return fig, ax
[ "def _validate_axes_instance(ax):\n if ax is None:\n ax = plt.gca()\n elif not isinstance(ax, mpl.axes.Axes):\n raise TypeError(\"ax must be a matplotlib.axes.Axes instance.\")\n\n return ax", "def _setup_figure(self):\n if self.fig_size:\n size = self.fig_size\n el...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for valid x_var and args
def _check_x_var(self, x_var, omp, mpi): x_map = {'omp': mpi, 'mpi': omp} name_map = {'omp': 'mpi', 'mpi': 'omp'} if x_map[x_var] is None: raise ValueError(f"must specify {name_map[x_var]} if x_var='{x_var}'")
[ "def check_arguments(self, args, func_obj, called_func):\n # variable, default_value: UnitXObject\n args_without_default = []\n for variable, default_value in self.defined_args:\n if not default_value:\n args_without_default.append([variable, None])\n\n #\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set axis properties for subplot (see plot_multiple)
def _set_ax_subplot(self, axes, x_var, y_var, row, col, omp, x_scale, y_scale): ax = axes[row, col] nrows = axes.shape[0] ncols = axes.shape[1] if col == 0: self._set_ax_text(ax=ax, omp=omp, fixed_var='omp') if self.scaling_type == 'strong...
[ "def configure_axes(fig, x_axis_kwargs, y_axis_kwargs):\n fig.update_xaxes(showline=True, linewidth=1, linecolor='black',\n ticks='outside')\n if x_axis_kwargs:\n fig.update_xaxes(**x_axis_kwargs)\n fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#CCC')\n fig.update_ya...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot dashed line on axis
def _set_ax_dashed(self, ax, y_var): x = [1, self.max_cores] if y_var == 'efficiency': ax.plot(x, [100, 100], ls='--', color='black') elif y_var == 'speedup': ax.plot(x, x, ls='--', color='black')
[ "def draw_dashed_line(surf, color, start_pos, end_pos, width=1, dash_length=10):\r\n origin = Point(start_pos)\r\n target = Point(end_pos)\r\n displacement = target - origin\r\n length = len(displacement)\r\n slope = displacement/length\r\n\r\n for index in range(0, int(length/dash_length), 2):\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a fake user id. This gets used when FakeIdentServer is being used and the userid was initially set to None. It creates a random UUID as specified by RFC 4122.
def generate_fake_userid(): return str(uuid.uuid4())
[ "def generate_user_uid(user):\n\n return urlsafe_base64_encode(force_bytes(user.pk))", "def new_id():\n bs = uuid4().bytes\n return urlsafe_b64encode(bs).strip().replace('=', '')", "def create_unique_id():\n from uuid import uuid4\n\n return str(uuid4())", "def generate_uuid(self, user_name):\n\t\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dispatch a request onto an _IdentChannel instance.
def handle_accept(self): _IdentChannel(self.userid, *self.accept())
[ "def dispatch(self, req):\n check_type(req, Request)\n self.messages.put(req)", "def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n break\n result = dispatch(self.state, raw_command)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all instances of CollegeCoach
def collegecoachs_get(label=None, page=None, per_page=None): # noqa: E501 return query_manager.get_resource( label=label, page=page, per_page=per_page, rdf_type_uri=COLLEGECOACH_TYPE_URI, rdf_type_name=COLLEGECOACH_TYPE_NAME, kls=CollegeCoach)
[ "def online_colleges():\n college_list = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/college_info.json'))\n return render_template('online_colleges.html',title='collegeSMART - Online Colleges',colleges=college_list)", "def collegecoachs_id_get(id): # noqa: E501\n\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a single CollegeCoach by its id
def collegecoachs_id_get(id): # noqa: E501 return query_manager.get_resource(id=id, rdf_type_uri=COLLEGECOACH_TYPE_URI, rdf_type_name=COLLEGECOACH_TYPE_NAME, kls=CollegeCoach)
[ "def get_course(course_id):\r\n try:\r\n course = db.session.query(Course).filter_by(id=course_id).one()\r\n return course\r\n except NoResultFound:\r\n return None", "def getCourse(self, courseId):\n courseList = self.getCourseList()\n for course in courseList:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an approximation for the amount of power the sail derives from the wind.
def get_sail_power(angle_to_wind): if closest_starboard < angle_to_wind % tau < closest_port: return 0.2 s = sin(angle_to_wind) return ( 0.4 * s * s + 0.1 * cos(angle_to_wind) + 0.6 # get a little bit anyway )
[ "def get_solar_generator_power(self):\n return self._get_content_of_own_consumption()[5]", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def estimate_power(self):\n p = self._solve_power_for_pct(.50)\n p05 = norm._cdf(self._compute_stouffer_z_at_power(.051...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an approximation for the heeling moment at a given angle to the wind.
def get_heeling_moment(angle_to_wind): a = angle_to_wind % tau if a > pi: a -= tau if closest_starboard < a < closest_port: return 0 return sin(0.5 * a) - 0.25 * sin(1.5 * a)
[ "def solar_angle(hour):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.results.solar.angle\", \r\n hour)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)", "def haversin(angle):\n return ((...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
looks through keys_list and picks the entries out of the database that have an objType in the passed list return order is arbitrary
def searchObjTypeList(self,keys_list=None,objType_list=[".obj.pub",".obj.pub.article",".obj.pub.book"]): if not keys_list: keys_list = self.getEntryList() return [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]
[ "def searchObjTypeDerive(self,keys_list=None,query_objType=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]", "def get_objects(obj_type):\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
searches through keys_list and selects the keys for which the entry in the database has an objType string that begins with the string query_objType return order is arbitrary
def searchObjTypeDerive(self,keys_list=None,query_objType=".obj.pub"): if not keys_list: keys_list = self.getEntryList() return [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]
[ "def searchObjTypeList(self,keys_list=None,objType_list=[\".obj.pub\",\".obj.pub.article\",\".obj.pub.book\"]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType in objType_list]", "def searchKeywords(self,keys...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
searches through keys_list and picks the entries out of the databsae that have keywords in the list_of_keywords set return order is sorted from most matching keywords to fewest search is case insensitive
def searchKeywords(self,keys_list=None,keyword_list=[]): if not keys_list: keys_list = self.getEntryList() #we make the query set case insensitive by converting all the strings to lowercase list_of_keywords_lower = map(lambda x: x.lower(), keyword_list) #we define a function that checks how many element...
[ "def join_strings_by_keywords(list, keywords, join=' '):\n res = []\n append = False\n for i, elem in enumerate(list):\n if (append):\n try:\n res[-1] = res[-1] + join + elem\n except:\n res.append(elem)\n append = False\n con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
searches through keys_list and picks the entries out of the database that have all the authors in the specified list return order is arbtitrary search is case sensitive
def searchAuthors(self,keys_list=None,author_list=[]): if not keys_list: keys_list = self.getEntryList() #only select keys for which all the authors in the query list are in the obj.authors set return [k for k in self.searchObjTypeDerive(keys_list) if set(author_list).issubset(self.entries[k].authors)]
[ "def searchKeywords(self,keys_list=None,keyword_list=[]):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\t#we make the query set case insensitive by converting all the strings to lowercase\n\t\tlist_of_keywords_lower = map(lambda x: x.lower(), keyword_list)\n\n\t\t#we define a function th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
searches through keys_list and picks the entries for which the object in the database has attribute defined and == value return order is arbitrary default is to search attribute='objType' and attribute(value)=='.obj.pub' ""
def searchAttribute(self,keys_list=None,attribute="objType",value=".obj.pub"): if not keys_list: keys_list = self.getEntryList() return [k for k in keys_list if k in self.getEntryList() and hasattr(self.entries[k],attribute) and getattr(self.entries[k],attribute) == value ]
[ "def searchObjTypeDerive(self,keys_list=None,query_objType=\".obj.pub\"):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\treturn [k for k in keys_list if k in self.getEntryList() and self.entries[k].objType[:len(query_objType)] == query_objType]", "def searchObjTypeList(self,keys_list=N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sorts publicaitons in keys_list by the year of the corresponding database entry return order is so that [0] is newest and [1] is oldest unless invert is specified as true (default is false)
def sortByYear(self,keys_list=None,invert=False): if not keys_list: keys_list = self.getEntryList() r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub") r_list.sort(key=lambda x : self.entries[x].year,reverse=not invert) return r_list
[ "def sort_by_year(sort_list):\n sort_list.sort(key=lambda song: song.year)", "def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entrie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sorts publications in keys_list alphabetically by the first author of the corresponding database entry return order is 0 to 9 to A to a to Z to z to special characters
def sortByFirstAuthor(self,keys_list=None): if not keys_list: keys_list = self.getEntryList() r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub") r_list.sort(key = lambda x: self.entries[x].authors[0] ) return r_list
[ "def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].title )\n\t\treturn r_list", "def sortByYear(self,keys_list=None,invert=False):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sorts publications in keys_list alphabetically by the title of the corresponding database entry return order is 0 to 9 to A to a to Z to z to special characters
def sortByTitle(self,keys_list=None): if not keys_list: keys_list = self.getEntryList() r_list = self.searchObjTypeDerive(keys_list,query_objType=".obj.pub") r_list.sort(key = lambda x: self.entries[x].title ) return r_list
[ "def sortByFirstAuthor(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].authors[0] )\n\t\treturn r_list", "def sortByYear(self,keys_list=None,inv...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an access token request and get new token(s). If auth_code is passed then both access and refresh tokens will be requested, otherwise the existing refresh token is used to request an access token.
def google_token_request(self, auth_code=None): # Build request parameters. Order doesn't seem to matter, hence using dict. token_request_data = { 'client_id': self.client_id, 'client_secret': self.client_secret, } if auth_code is None: # Use existing ...
[ "def get_access_token(self, auth_code: str):\n api_url = f'{self.root}/token'\n params = {\n 'client_id': self.client_id,\n 'client_secret': self.secret,\n 'code': auth_code,\n 'grant_type': 'authorization_code',\n 'redirect_uri': self.callback_url,\n }\n auth_header = self.secu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an instance of BlackjackMDP where peeking is the optimal action at least 10% of the time.
def peekingMDP(): # BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this) # raise Exception("Not implemented yet") return BlackjackMDP(cardValues = [16, 5, 4], multiplicity = 3, threshold = 20, peekCost = 1) # END_YOUR_CODE
[ "def trainCFR(deck,history,players,reachProbs,currentPlayer,sets,limit,absLevel,forgetful,probabilistic):\n his = deepcopy(history)\n #if game over, return payoff and halt recursion\n if isTerminal(his,players):\n #if last player folded, current p gets pot\n if his[-1] == \"Fold\":\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a copy of the move
def copy(self): return Move(self.x, self.y, self.z, self.dir)
[ "def move_copy(self, position):\n return Coord(self.x + position[0], self.y + position[1])", "def _move(self, at, to):\n copy = self.copy()\n i, j = at\n r, c = to\n copy.board[i][j], copy.board[r][c] = copy.board[r][c], copy.board[i][j]\n return copy", "def next_move(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the flags for the move to start animating
def start(self): self.animating = True self.finished = False self.angle = 0
[ "def start_move_beam_mark(self):\n QApplication.setOverrideCursor(QCursor(Qt.BusyCursor))\n self.emit(\"infoMsg\", \"Move beam mark\")\n self.in_move_beam_mark_state = True\n self.start_graphics_item(\\\n self.graphics_move_beam_mark_item,\n start_pos = self.graph...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return url for the notice detail
def notice_detail_url(notice_id): return reverse('information:notice-detail', args=[notice_id])
[ "def get_absolute_url(self):\n return ('view-note', (), {'category': self.category, 'slug': self.slug})", "def issue(self):\n return self._url('issue')", "def get_note_url(note):\n client = get_unauthorized_evernote_client()\n return 'https://%s/Home.action?#n=%s' % (client.service_host, not...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create and return a sample scope
def sample_scope(description='General', **kwargs): defaults = {} defaults.update(kwargs) return models.Scope.objects.create(description=description, **defaults)
[ "def training_scope(**kwargs):\n return lib.training_scope(**kwargs)", "def sub_scope(self, kind, name, node, lineno):\n generator = kind(self.space, name, node, lineno, self.symbols,\n self.compile_info)\n return generator.assemble()", "def create_one_sample():\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create and return sample notice
def sample_notice(source, **kwargs): defaults = { 'scope': sample_scope(), 'title': 'Test title', 'message': 'Lorem ipsum dolor sit amet', } defaults.update(kwargs) return models.Notice.objects.create(source=source, **defaults)
[ "def send_notice(notice):\n\tlogging.debug(\"called : %s\", __name__)\n\n if notice is None:\n\t\tlogging.error(\"empty notice is recieved\")\n return\n\n\ttime = notice.print_time\n\ttitle = notice.title\n\tbody = view.get_text_notice(notice, True)\n\n\ttprint(\"Sending notice {} dated {}.\"....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test model attributes against a payload, with instance being self in a testcase class
def test_all_model_attributes(insance, payload, model, serializer): ignored_keys = ['image'] relevant_keys = sorted(set(payload.keys()).difference(ignored_keys)) for key in relevant_keys: try: insance.assertEqual(payload[key], getattr(model, key)) except: insance.asse...
[ "def test_to_check_instance_variables(self):\n self.assertEquals(self.new_source.id, 'newsbyelkwal')\n self.assertEquals(self.new_source.name, 'My News')\n self.assertEquals(self.new_source.description, 'get the latest updates')\n self.assertEquals(self.new_source.url, 'https://google.com')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test retrieving a notice's detail
def test_retrieve_notice_detail(self): notice = sample_notice(source=self.user) serializer = serializers.NoticeSerializer(notice, context=serializer_context) url = notice_detail_url(notice_id=notice.id) res = self.client.get(url) self.assertEqual(res.status_code, status...
[ "def test_notification_get(self):\n pass", "def _ReadLMNoticeContents(self):\n return self.RemoteCommand(f'type {self.temp_dir}\\\\{self._LM_NOTICE_LOG}')[0]", "def test_validate_notice(session, desc, valid, doc_type, notice, mhr_num, account, message_content):\n # setup\n json_data = get_valid_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test partially updating a notice's detail using patch
def test_partial_update_notice(self): notice = sample_notice(source=self.user) scope = sample_scope(description='Private', is_general=False) scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context) payload = { 'scope': scope_serializer.data['url'], ...
[ "def test_full_update_notice(self):\n notice = sample_notice(source=self.user)\n scope = sample_scope(description='Private test', is_first_year=True)\n scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)\n payload = {\n 'source': self.user.id,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test updating a notice's detail using put
def test_full_update_notice(self): notice = sample_notice(source=self.user) scope = sample_scope(description='Private test', is_first_year=True) scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context) payload = { 'source': self.user.id, '...
[ "def test_partial_update_notice(self):\n notice = sample_notice(source=self.user)\n scope = sample_scope(description='Private', is_general=False)\n scope_serializer = serializers.ScopeSerializer(scope, context=serializer_context)\n payload = {\n 'scope': scope_serializer.data[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main function. Runs simulation based on the given four blocks.
def main(): B1 = ['R', 'Y', 'B', 'G', 'G', 'R'] B2 = ['Y', 'Y', 'R', 'B', 'G', 'R'] B3 = ['G', 'B', 'G', 'R', 'Y', 'B'] B4 = ['Y', 'R', 'Y', 'B', 'Y', 'G'] Blocks = [B1, B2, B3, B4] print("This is the solution for the box:") print("*********************************") Soln = simulate(Bloc...
[ "def run_experiment():\n \n print_instructions(instructions)\n print_instructions(instructions2)\n run_blocks(PRACTICE_BLOCKS, f, True) \n print_instructions(instructions3)\n run_blocks(BLOCKS, f)\n print_instructions(exit_message)\n save_and_quit(f)", "def main():\n # TODO (gina) : co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Aggregate the peers sampled
def aggregate_peer_samples(self): with open("peer_samples.csv", "w") as out_file: out_file.write("peer_id\n") for _, filename, _ in self.yield_files('peer_samples.csv'): with open(filename) as samples_file: for peer_sample in samples_file.readlines(): ...
[ "def aggregate_weights(self, clients_params):", "def get_sender_set_distribution_full(self):\n sender_set = self.get_sender_set()\n distro_set = {}\n total = len(sender_set)\n for node in sender_set:\n distro_set[node] = 1.0 / + total\n\n return distro_set", "def te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save cpu in the database. This method is called for every item pipeline component.
def process_item(self, item, spider): session = self.Session() cpu = models.CPU(**item) try: session.add(cpu) session.commit() except: session.rollback() raise finally: session.close() return item
[ "def cpu(self, cpu):\n if cpu is None:\n raise ValueError(\"Invalid value for `cpu`, must not be `None`\") # noqa: E501\n\n self._cpu = cpu", "def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu):\r\n self._validate_vm_cpu(vm_cpu)\r\n self._change_vm_cpu(vapp_or_vm_id, vm_cpu)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns the final fitted shape.
def final_shape(self):
[ "def output_shape(self):\n return None", "def shape(self) -> S:", "def split_shape(self):\n return self.__split_shape", "def shape(self):\n return self._input.shape", "def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)", "def shape_a(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns the initial shape from which the fitting started.
def initial_shape(self):
[ "def _get_weights_shape(self, kwargs):\n if kwargs['shape'] is not None:\n return kwargs['shape']\n else:\n if 'initial_value' not in kwargs:\n raise ValueError(\n '`initial_value` is not in kwargs: cannot infer the shape.')\n elif callable(kwargs['initial_value']):\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns a copy of the fitted image with the following landmark
def fitted_image(self): image = Image(self.image.pixels) image.landmarks['initial'] = self.initial_shape image.landmarks['final'] = self.final_shape if self.gt_shape is not None: image.landmarks['ground'] = self.gt_shape return image
[ "def crop_landmark2(image, landmarks, part, show_crop=False):\n dims = np.load('landmark_dims.npy')\n\n if (part == \"left eyebrow\" or part == 0):\n rango = range(17, 22)\n w, h = dims[0] // 2\n elif (part == \"right eyebrow\" or part == 1):\n rango = range(22, 27)\n w, h = dim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Generates a list containing the transforms obtained at each fitting iteration.
def transforms(self): return [self.fitter.transform.from_vector(p) for p in self.shape_parameters]
[ "def transform(self, X):\n result = []\n for name, transformer in self.transformer_list:\n result.append(transformer.transform(X))\n\n return result", "def transforms(self) -> TransformationsList:\n transforms = TransformationsList(self)\n depends_on = self.get_field(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns the final transform.
def final_transform(self): return self.fitter.transform.from_vector(self.shape_parameters[-1])
[ "def transform(self):\n return self.interface.transform", "def get_transform(self):\n transform = self.InvertedMercatorLatitudeTransform(self.thresh)\n return transform", "def transform(self):\n ext = os.path.splitext(self.filename)[-1][1:]\n t = self.transformers.get(ext, lam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Returns the initial transform from which the fitting started.
def initial_transform(self): return self.fitter.transform.from_vector(self.shape_parameters[0])
[ "def get_transform(self):\n transform = self.InvertedMercatorLatitudeTransform(self.thresh)\n return transform", "def transform(self):\n return self.interface.transform", "def start_coordinates(self):\n return self.transformation.from_system", "def getTransform(self):\n\tbuffer = m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The list containing the warped images obtained at each fitting iteration.
def warped_images(self): mask = self.fitter.template.mask transform = self.fitter.transform interpolator = self.fitter.interpolator return [self.image.warp_to(mask, transform.from_vector(p), interpolator=interpolator) for p in self.shape...
[ "def warped_images(self):\n mask = self.algorithm_results[-1].fitter.template.mask\n transform = self.algorithm_results[-1].fitter.transform\n interpolator = self.algorithm_results[-1].fitter.interpolator\n\n warped_images = []\n for s in self.shapes():\n transform.set_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The list containing the appearance reconstruction obtained at each fitting iteration.
def appearance_reconstructions(self): if self.appearance_parameters: return [self.fitter.appearance_model.instance(w) for w in self.appearance_parameters] else: return [self.fitter.template for _ in self.shapes]
[ "def appearance_reconstructions(self):\n return flatten_out(\n [f.appearance_reconstructions for f in self.algorithm_results])", "def getDisplacements(self):\n return np.array(self.disps)", "def get_representative_fits(self):\n\n\t\treturn self._repfit_list", "def get_all_posteriors(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The list containing the error images obtained at each fitting iteration.
def error_images(self): template = self.fitter.template warped_images = self.warped_images appearances = self.appearance_reconstructions error_images = [] for a, i in zip(appearances, warped_images): error = a.as_vector() - i.as_vector() error_image = tem...
[ "def error_images(self):\n return flatten_out(\n [f.error_images for f in self.algorithm_results])", "def errors(self):\n return [thread.err for thread in self._threads]", "def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The number of levels of the fitter object.
def n_levels(self): return self.fitter.n_levels
[ "def count_levels(self):\r\n lcount = 0\r\n rcount = 0\r\n if self.left:\r\n lcount = self.left.count_levels()\r\n if self.right:\r\n rcount = self.right.count_levels()\r\n return 1 + max(lcount, rcount)", "def _get_number_of_alpha_levels(self):\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The total number of iterations used to fitter the image.
def n_iters(self): n_iters = 0 for f in self.algorithm_results: n_iters += f.n_iters return n_iters
[ "def getNIterations(self):\n return self.n_iterations", "def num_passed_iterations(self) -> int:\n\n return self._num_passed_iterations", "def min_num_iterations():\n err = 1e6\n count = 0\n ERROR_BOUND = 1e-4\n while (err > ERROR_BOUND):\n bkp_utils = utilities.copy()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" The final fitted shape.
def final_shape(self): final_shape = self.algorithm_results[-1].final_shape return self._affine_correction.apply(final_shape)
[ "def output_shape(self):\n return None", "def shape(self) -> S:", "def split_shape(self):\n return self.__split_shape", "def shape(self):\n return self._input.shape", "def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape", "def state_sha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }