content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def normalize_column_name(column_name): """ puts a CSV column name into a "normalized" form for comparison. """ norm_column_name = column_name if norm_column_name is not None: norm_column_name = norm_column_name.strip() norm_column_name = norm_column_name.lower() return norm_column_name
ef817bc16d3fea634fbdcde0cfb280902496018f
19,917
def get_entry(root_node: dict, entry_name: str, default_value): """Gets the entry from the root node or creates one with the default value if none is existing in the root node Arguments: root_node {dict} -- Root node entry_name {str} -- Entry name default_value {[type]} -- Default value Returns: [type] -- Entry """ node = default_value try: node = root_node[entry_name] except: pass return node
57c7ddb0dc546677ccf58f731dbc19e42e925dbb
19,920
import re def replace_aea_add_statements( content: str, old_string: str, new_string: str, type_: str ) -> str: """Replace statements of the type: 'aea add <type> <old_string>'.""" if type_ != "agents": content = re.sub( fr"aea +add +{type_} +{old_string}", f"aea add {type_} {new_string}", content, ) return content
b0670f2ffc759266c2609be21efda86ca83a3ea1
19,921
import csv def gete2wlandw2el(datafile): """Get dict inputs for the EM algorithm. Parameters ---------- datafile: str path to datafile. The data file itself must be a csv file with 3 columns: question, worker, answer and the first row is the column names. Returns ------- dict Indexed by question Each value is a list of workers and labels assigned by them. For example, e2wl['1'][3] -> ['4', '3'] means that questoin '1', was answered as '3' by worker '4' dict Indexed by worker name Each value is a list of questions and labels assigned by the worker. For example, w2el['4'][0] -> ['1', '3'] means that worker '4', when asked question '1', assigned the label '3'. list list of unique labels in the dataset """ e2wl = {} w2el = {} label_set = [] f = open(datafile, 'r') reader = csv.reader(f) next(reader) for line in reader: example, worker, label = line if example not in e2wl: e2wl[example] = [] e2wl[example].append([worker, label]) if worker not in w2el: w2el[worker] = [] w2el[worker].append([example, label]) if label not in label_set: label_set.append(label) return e2wl, w2el, label_set
11af97514625b6750a14540bf9790c904d6cc141
19,922
def extract_info_attributes(turn): """Extract information attributes for current round using NLU annotations. Args: turn: Current round information Returns: get_attribute_matches: Information attributes """ user_annotation = eval(turn["transcript_annotated"]) assistant_annotation = eval(turn["transcript_annotated"]) annotation = user_annotation + assistant_annotation # annotation = user_annotation all_intents = [ii["intent"] for ii in annotation] get_attribute_matches = [] for index, intent in enumerate(all_intents): if any( ii in intent for ii in ("DA:ASK:GET", "DA:ASK:CHECK", "DA:INFORM:GET") ): # If there is no attribute added, default to info. if "." not in intent: get_attribute_matches.append("info") continue attribute = intent.split(".")[-1] if attribute == "info": new_matches = [ ii["id"].split(".")[-1] for ii in annotation[index]["slots"] if "INFO" in ii["id"] ] if len(new_matches): get_attribute_matches.extend(new_matches) else: get_attribute_matches.append("info") elif attribute != "": get_attribute_matches.append(attribute) return sorted(set(get_attribute_matches))
b995fbf439a7a3a7886e288319c680e45bba11cb
19,923
import os def sys_kdialog_path(): """ Returns the path of kdialog if found in system $PATH """ steampath = os.environ['PATH'].split(':') syspath = [x for x in steampath if 'steam-runtime' not in x] for path in syspath: kdialog_path = os.path.join(path, 'kdialog') if os.path.exists(kdialog_path) and os.access(kdialog_path, os.X_OK): return kdialog_path return False
14794781e7249cd1956132fa2bad661ce9a4345f
19,924
def unIndex(i, n, trunc): """ Converts a 1-dimensional index ``i`` with truncation ``trunc`` and number of modes ``n`` to a n-ary index. """ return [i // trunc**(n - 1 - m) % trunc for m in range(n)]
ef4d72e264de6225ea20eafc6d15f2f9721e2a9c
19,925
from typing import List from typing import Tuple def get_matching_rules(rules: List[Tuple[str, ...]], word_to_lemmatize: str) -> List[Tuple[str, ...]]: """ filter rules that do not have affix ending in given word to lemmatize :param rules: list of rules :param word_to_lemmatize: str :return: list of filtered rules """ chopping_rules = list() for rule in rules: if word_to_lemmatize.endswith(rule[3].split('/')[0]): chopping_rules.append(rule) return chopping_rules
0fa32e06c8741825c0a2adedf8888844b51fa7df
19,927
def to_list(results): """ Purpose: Simplify the ComputeSkipGrams result set :param results: a ComputeSkipsGrams result set looks like this [(u'Problems', u'installing'), (u'Problems', u'adobe'), (u'Problems', u'acrobat'), ... ,] :return: a list of results looks like this ["Problems installing", "Problems adobe", "Problems acrobat", ... ,] """ the_list = [] for result in list(results): the_list.append(" ".join(list(result))) return the_list
8c6e245a61303fbbc035f1d2f4b36d6e17a66970
19,928
def recursive_string_find(pattern, string, start=0): """Recursive search function. Returns list of indices of `pattern` in `string`. """ pos = string.find(pattern, start) if pos == -1: # Not found! return [] # No need for else statement return [pos] + recursive_string_find(pattern, string, pos + len(pattern))
e2c4d7daf39272cf738a3de3b08a40e3911c9ca2
19,929
def _est_transp_recursive(transp, name): """ Internal function used by `estimate_transposition` """ if isinstance(transp, int): return transp else: try: for pattern, result in transp: if pattern.search(name) is not None: # print('matching name to', name, pattern.pattern) est = _est_transp_recursive(result, name) return est return None except: Exception(('Format error in TRANSPOSITIONS detected while ' 'determining transposition for instrument "{0}"') .format(name))
81833c255c67eecb8a1ea4b82ea675d660bbc6a5
19,931
def longest_common_prefix(string1, string2): """get the longest common prefix in two strings""" i = 0 while i < len(string1) and len(string2) and string1[1] == string2[i]: i += 1 return string1[:i]
3647752e9f953abc1fd63a4ed2b2c724faee3c1f
19,932
import math def get_color(a,cmin,cmax): """\ Convert a float value to one of a continuous range of colors. Rewritten to use recipe 9.10 from the Python Cookbook. """ try: a = float(a-cmin)/(cmax-cmin) except ZeroDivisionError: a=0.5 # cmax == cmin blue = min((max((4*(0.75-a),0.)),1.)) red = min((max((4*(a-0.25),0.)),1.)) green = min((max((4*math.fabs(a-0.5)-1.,0)),1.)) return '#%1x%1x%1x' % (int(15*red),int(15*green),int(15*blue))
d300ee2a93a89cea23b1379dff031d6fd117e700
19,934
from typing import List def binary_search(arr: List[int], key: int) -> int: """Returns the index of a given key in a sorted array, or -1 if key not found. 1. Get left and right indices. 2. Calculate the mid. 3. Depending on whether the key is bigger or smaller than mid, update left of right. Space: O(1) Time: log(n) :param arr: The sorted array to search :param key: The key to search for :return: The index of the key if found, -1 otherwise """ left = 0 right = len(arr)-1 while left <= right: mid = left + (right-left)//2 if key > arr[mid]: left = mid+1 elif key < arr[mid]: right = mid-1 else: return mid # Key not found return -1
ba3e268890eb0dbab8033df56bcf6941df08819c
19,936
def convert_image_name(cur,imgNum): """ Converts imgNum to image file name without extension, as used in adjacency2 table Parameters: ----------- cur : MySQLdb cursors imgNum : str IMG_Number field entry for table image """ sql = ("select IMG_File " "from image " "where IMG_Number = '%s' " %(imgNum)) cur.execute(sql) return cur.fetchone()[0].split('.')[0]
ca1087422415115bd703859ad330673f9b893b42
19,938
import os def enumerate_test_files(): """Build a list of all test .py files within Course Builder.""" ret = [] for dirpath, dirnames, filenames in os.walk('.'): for filename in filenames: if (dirpath.startswith('./modules/') and ( filename.endswith('_test.py') or filename.endswith('_tests.py')) or (dirpath.startswith('./tests/') and filename.endswith('.py'))): path = os.path.join(dirpath, filename) ret.append(path) return ret
f585a013f9af63e2ba19678506db200ddcc046d0
19,939
import os def repo_with_submodule(git_identity, tempdir, checkouts, cc): """Repository with a submodule used to test detection of non-tracking branches in submodules. Will be created in a subdirectory of `tempdir`, whose path will be returned.""" # Create a new repository: d = os.path.join(tempdir, 'with-submodules') os.mkdir(d) system = 'git' cc([system, 'init'], cwd=d) # Add a remote repo as a submodule: submodule_name = system + ' (submodule)' remote = os.path.join(checkouts, system + '-clean') cc([system, 'submodule', 'add', '-f', remote, submodule_name], cwd=d) cc([system, 'commit', '-m', 'Initial commit with submodule.'], cwd=d) # Create a non-tracking branch in the submodule: submodule_dir = os.path.join(d, submodule_name) cc([system, 'checkout', '-b', 'non-tracking'], cwd=submodule_dir) return d
52a2cb7be0a01ac59a57b24a27d66081f8f90a08
19,940
def multiprocess_fn(pool, fn, input_list, opts=[]): """ multiprocessing util tool """ results = [ pool.apply_async(fn, args=(x, ) + tuple(opts)) for x in input_list ] results = [p.get() for p in results] return results
eb716aa83de582d904f7e75ba47de8b437e33c0e
19,943
def force_num(string, output="int"): """ Input: string that ideally can be converted to a number Returns: converted number or 0 if conversion isn't possible """ convert_dict = {"int": int, "float": float} try: return convert_dict[output](string) except: return 0
73c2129f0ff26c718d650d87f7d5bf9177651a0d
19,944
def horner(c,x): """ horner(c,x) Evaluate a polynomial whose coefficients are given in descending order in `c`, at the point `x`, using Horner's rule. """ n = len(c) y = c[0] for k in range(1,n): y = x*y + c[k] return y
f5e161ad1694cc76dea3026af594f026f6d8e606
19,945
import re def sanitize(text): """ Remove non-urls word by word. """ text = text.strip() text = re.sub('\s\s+', ' ', text) tmp = '' for i in text.split(' '): if len(i) >= 5: if i.find('www.') != -1 or i.find('http') != -1: tmp += i + ' ' tmp = tmp.strip() tmp2 = '' for i in tmp.split(' '): if (i[0] == '(' and i[-1] == ')') or (i[0] == '[' and i[-1] == ']') or (i[0] == '<' and i[-1] == '>') or (i[0] == '{' and i[-1] == '}'): # First and last character is one of ()[]{}<> tmp2 += i[1:-1:1] + ' ' else: tmp2 += i + ' ' tmp2 = tmp2.strip(); tmp = '' for i in tmp2.split(' '): if i.find('www.') == 0: tmp += 'http://' + i + ' ' else: tmp += i + ' ' tmp = tmp.strip(); out = tmp; return out;
f2d524678648c8b7bea88631bfa49dadded96f98
19,946
from typing import Tuple from typing import Type from typing import Any def get_types_filter(desired_types: Tuple[Type[Any], ...]): """Returns a value filter that only keeps values with the given types.""" return lambda arg_value: arg_value.type in desired_types
a245199b6d7bfa7203828b45d32cd62e1900e811
19,947
def _tf(word_occured_in_doc: int, total_words_in_doc: int) -> float: """Term frequency of a word in certain document. See: https://bit.ly/3zEDkMn """ assert word_occured_in_doc <= total_words_in_doc return word_occured_in_doc / total_words_in_doc
32bd03d0b068ad229b7d9871bf3665643d35021e
19,948
def a_max(f, lst): """Finds max""" val_max = lst[0] for l in lst: if f(l) > f(val_max): val_max = l return val_max
69fb35c9a5fc7ab7e26fa0eb0d36c8ced03d242d
19,949
def pcmatrixrow(matrix, form, trans): """ Returns the row for the specified form and transposition level. Parameters ---------- matrix : tuple A pitch class matrix created by pcmatrix(). form : 'p' | 'r' | 'i' | 'ri' The row form to return: 'p' is prime, 'r' is retrogade, 'i' is inversion, and 'ri' is retrograde-inversion. trans : pc The pitch class transposition level of the row to return. Returns ------- A tuple containing the pitch classes for the given form and transposition. """ size = len(matrix) assert 0 <= trans < size, "Not a valid transposition level: {}.".format(trans) row = col = 0 if form in ['p', 'r']: while row < size: if matrix[row][col] == trans: break row += 1 assert row < size, "Not a valid row transposition: {}.".format(row) return matrix[row] if form == 'p' else matrix[row][::-1] elif form in ['i', 'ri']: while col < size: if matrix[row][col] == trans: break col += 1 assert col < size, "Not a valid row transposition: {}.".format(col) rng = range(0, size) if form == 'i' else reversed(range(0, size)) return tuple(matrix[r][col] for r in rng) else: raise Exception("Not a valid row form: {}".format(form))
76a149cefcb9c3420cfb0c569c17687b4c49cba1
19,952
def _return_kwargs(dummy_req, **kwargs): """A dummy api call that simply returns its keyword arguments.""" return kwargs
fa43ffcd84ee2016f24441306a2b92c37e6484c8
19,953
def apply_options(args): """Apply options""" if args.notifications: args.notifications = [ {"uid": notification} for notification in args.notifications ] return args
f0570990a9372179a5d54de29996bb5b20553fe9
19,954
from typing import Dict from typing import Callable def map_dict(d1: Dict, d2: Dict, f: Callable) -> Dict: """ Return f(d1.k, d2.k), a function of two dicts, matching on key. @param d1: dictionary A @param d2: dictionary B (must have same keys as d1) @param f: function @return: dictionary where the values are an arbitrary function of the values of two input dictionaries """ assert d1.keys() == d2.keys() ks = d1.keys() D = {} for k in ks: D[k] = f(d1[k], d2[k]) return D
12b38bff20d88567fd26fe7026084ad047df5f32
19,956
def get_A_door_270_f_i(): """階層fにおける単位住戸iの主開口方位から時計回りに270°の方向に面した玄関ドア面積…………式(13d) Args: Returns: float: 階層fにおける単位住戸iの主開口方位から時計回りに270°の方向に面した玄関ドア面積(m2) """ return 0.0
d3559b7bdcb14c1755a9d67decc145f6bcf208a7
19,958
def html_code__macro_confluence(text): """ Wrap into html macro :param text: :return: """ return ('''\ <ac:structured-macro ac:name="html" ac:schema-version="1"> <ac:plain-text-body><![CDATA[{text}]]></ac:plain-text-body> </ac:structured-macro> ''').format(text=text)
93d23f282657375d9b9443da3a0c3e7534338076
19,959
def get_clk_name(clk): """Return the appropriate clk name """ if clk == 'main': return 'clk_i' else: return "clk_{}_i".format(clk)
1204f7db18b74ede3afa72ed2aa434f9b2dac984
19,960
def people_at_risk_without_test(tx): """ Method that queries the database for collecting all people that appeared in a covid exposure but haven't done the test yet :param tx: session :return nodes of person """ query = ( "MATCH(i:Person)-[inf: COVID_EXPOSURE]->(p:Person) " "WHERE NOT EXISTS { MATCH (i)-[inf: COVID_EXPOSURE]->(p), (p3:Person)-[m: MAKE_TEST]->(:Test) " "WHERE m.date >= inf.date AND id(p) = id(p3) } " "RETURN p , ID(p)" ) result = tx.run(query).data() return result
eb8e28fa9e3a72aa28e047372b27e95fe0ba8c8b
19,963
import os import hashlib import mmap def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
d50c93cf8d860b7f0ea0d6b2cda5132e4bebd134
19,964
def remove_xenon(te_img, xe_img, te_iso, xe_iso, clamp_neg=True): """ Based on the abundances of the Xe and Te isotopes in question, and the Xe images, calculates the expected component the Te image that actually comes from Xe and subtracts it. By default the function assumes any pixels that become negative after subtraction contain no tellurium and so clamps them to 0. """ # Percentage abundance of different Xe isotopes xe_abundances = {'124': 0.095, '126': 0.089, '128': 1.910, '129': 26.401, '130': 4.071, '131': 21.232, '132': 26.909, '134': 10.436, '136': 8.857} """ Checks that the isotope requested is xenon contaminated. Returns the input array if not, and prints a warning. """ if str(te_iso)not in xe_abundances: print('{0} is not contaminated with Xe. Input image returned.'.format(str(te_iso))) return te_img ratio = xe_abundances[str(te_iso)] / xe_abundances[str(xe_iso)] scaled_xe = xe_img * ratio subtracted = te_img - scaled_xe # Clamp negative pixels to zero if clamp_neg if clamp_neg: subtracted[subtracted < 0] = 0 return subtracted
25c1f3d8d8f600290c1d41b520458dc6a00fc54a
19,965
def remove_utc(time_string): """ Removes UTC from end of time string. The purpose is to clean up time strings so they can be converted to Numpy datetime64 objects, since datetime64 cannot be initialized if if the input string includes the UTC marker. Args: - time_string (string): timestamp string from Purple Air dataset. Returns: - (string): the cleaned-up timestamp string (without "UTC") """ if time_string.endswith("UTC"): return time_string[:-3].strip() else: return time_string
f1f4a1cb6f8a1068a48610b4a821ef1eac3c7fb8
19,967
def get_partial_source(source, begin, end, marks=None, nb_digits=None): """Extracts a few relevant lines from a source file. If the number of lines would exceed a certain limit (currently 7) the function is called recursively to only show the a few lines at the beginning and at the end. """ if nb_digits is None: nb_digits = len(str(end)) no_mark = " {:%d}: " % nb_digits with_mark = " -->{:%d}: " % nb_digits continuation = " ..." if marks is None: marks = [] result = [] if end - begin > 7: # Suppose the source spans line numbers 1 to 12. Splitting it to show # partial result would show lines 1 to 3, followed by 10 to 12. # If we want the indentation of the first part to match the indentation # of the second part, we must specify the length of the # indentation of the first part. # See the traceback for MismatchedBracketsError in the # documentation for an example. result.append( get_partial_source( source, begin, begin + 2, marks=marks, nb_digits=nb_digits ) ) result.append(continuation) result.append(get_partial_source(source, end - 2, end, marks=marks)) else: lines = source.split("\n") for index, line in enumerate(lines, start=1): if index < begin: continue if index > end: break if index in marks: result.append(with_mark.format(index) + line) else: result.append(no_mark.format(index) + line) return "\n".join(result)
e0b2e57ba169f3ee0824e25e5c210a12224d53e4
19,968
from typing import Literal import os def shareable_scope(fixture_name, config) -> Literal["session", "function"]: """Return either session of function scope, depending on TEST_SHARED_FIXTURES envvar. This function can be used as a scope like this: @pytest.fixture(scope=shareable_scope) def myfixture(...) ... """ return 'function' if os.environ.get('TEST_SHARED_FIXTURES') is None else 'session'
7ad890b8a835739e4b08c2675310f3f00fd1adc9
19,969
def get_max_length(graphs): """ Get the max length among sequences. """ max_length = 0 for cascade_id in graphs: # traverse the graphs for max length sequence for sequence in graphs[cascade_id]: max_length = max(max_length, len(sequence[0])) return max_length
2dfa82526a291c08bfef0c8e5edb06b693caa224
19,970
def YUVtoYDbDr(Y, U, V): """ convert YUV (PAL) to RGB color :param Y: Y value (0;1) :param U: U value (-0.436-0.436) :param V: V value (-0.615-0.615) :return: YDbDr tuple (Y0;1 D-1.333-1.333) """ Db = 3.059 * U Dr = -2.169 * V return Y, Db, Dr
0977b91b6621d39c64ede773a44e3b15f3d52c0c
19,971
def get_hyperparams(): """ Returns a list of hyperparams to run.""" # test some initial hyperparams # import itertools # for lr, wd, a in itertools.product([1e-4, 1e-3, 1e-2], [0, 1e-6, 1e-4], ['flips', 'full']) : # yield {'learning_rate': lr, 'weight_decay': wd, 'augmentation': a} # test batch size with transposed return [{'loss_function': 'tc-vae', 'beta_weight': 0.3}, {'loss_function': 'tc-vae', 'beta_weight': 1}, {'loss_function': 'tc-vae', 'beta_weight': 3}, {'loss_function': 'tc-vae', 'beta_weight': 10}]
663120b1cd80fb2020b38c0dc055915e6e46c2bf
19,972
def quantum_state_preparation_qiskit(circuit, parameters, n = 2): """ Parameters ---------- circuit: qiskit circuit parameters: n*2 array each row contains rx and ry parameters Returns ------- qiskit circuit """ q = circuit.qregs for i in range(len(q)): circuit.rx(parameters[i][0], q[i]) circuit.ry(parameters[i][1], q[i]) circuit.cx(q[1], q[0]) return circuit
5443d7a4c29c6ab003f71e345d5a07925f538c21
19,973
def check_existence(species, specieslist, speciesidmap): """ check if the produced species is new in the DSD system :param species: produced species :param specieslist: list of species in the system :param speciesidmap: bi-directional map of species id and species canonical form :return: specieslist, speciesidmap """ if not speciesidmap.inverse.__contains__(species.canonicalform): species.set_id(len(specieslist) + 1) specieslist.append(species) speciesidmap.put(species.id, species.canonicalform) pos = species.id else: pos = speciesidmap.inverse[species.canonicalform] return specieslist, speciesidmap, pos
e9056fb9fd874b5223a3d1d7f06d6a67782612aa
19,974
def make_group(feature_df, key='friday_date'): """group maker for rank learning """ return feature_df.groupby(key).size().to_frame('size')['size'].to_numpy()
b4635ba8585dde070e4fe9cd147cad8b3ae0ad7f
19,976
def string_to_dict(string): """Return dictionary from string "key1=value1, key2=value2".""" if string: pairs = [s.strip() for s in string.split(",")] return dict(pair.split("=") for pair in pairs)
bf9de94d8bd54a2f65fc44ebbf8388f8fda58999
19,979
def compare_match(server_names, certificate_sans): """ Compares if the certificate would secure this domain :param server_names: list of ServerName values :param certificate_sans: list of SAN's that would be secured by the certificate :return: """ matches = [] for server_name in server_names: # exception use case: sn: "www.test.fr" san: "*.test.de, *.test.fr" for certificate_san in certificate_sans: sans = [] if ',' in certificate_san: sans.extend(certificate_san.split(',')) else: sans.append(certificate_san) # exception use case: sn: "FEDC:ba98:7654:3210:FEDC:BA98:7654:3210" san: "FEDC:BA98:7654:3210:FEDC:ba98:7654:3210" if ':' in server_name and ':' in certificate_san: if server_name == certificate_san: matches.append(server_name) else: continue for san in sans: server_name = server_name.strip().lower() san = san.strip().lower() server_name_split = server_name.split('.') san_split = san.split('.') server_name_level = len(server_name_split) san_level = len(san_split) server_name_base = ".".join(server_name_split[-server_name_level+1:]) san_base = ".".join(san_split[-san_level+1:]) if server_name == san: if server_name not in matches: matches.append(server_name) if server_name_level == san_level: if san_split[0] == '*' and (server_name_base == san_base): if server_name not in matches: matches.append(server_name) return matches
dd9916731a122abc75db0c817dfa4fb2584e12fd
19,980
def firstUniqChar_v3(s: str) -> int: """Slowest approach. Worst case complexity is O(n^2). Not much better than v2.""" for k, c in enumerate(s): if s.find(c) == s.rfind(c): return k return -1
432012c1aa9681c320f26fc565925b379a13ffdc
19,981
def dicts_equal(d1: dict, d2: dict) -> bool: """ Checks, whether the contents of two dicts are equal """ if d1 and d2: d2_keys = d2.keys() for key1, value1 in d1.items(): if key1 not in d2_keys: return False if d2[key1] != value1: return False elif not d1 and not d2: return True else: return False return True
c5be29d9a9fd667bca7c1815e9141d2fb46a3691
19,982
from typing import List def deduplicate(image_data: List[bytes]) -> List[bytes]: """ Remove any identical images. """ print(f'Size before de-duplication: {len(image_data)}') uniques = [] for image in image_data: if not any(image == unique_image for unique_image in uniques): uniques.append(image) print(f'Size after de-duplication: {len(uniques)}') return uniques
fbb338ac70d306992fadb61e44e6e82553127819
19,983
def _submit_run(webclient, config): """ Submits a single run using the web interface of the VerifierCloud. @return: the run's result """ run_result_future = webclient.submit_witness_validation(\ config.witness_file, config.program_file, config.configuration, config.cloud_user) webclient.flush_runs() return run_result_future.result()
fc0be1f88d9d5486b7a2ec141046d2eab99b3bc9
19,985
def get_expr_for_end_pos(field_prefix="v.", pos_field="start", ref_field="ref"): """Compute the end position based on start position and ref allele length""" return "%(field_prefix)s%(pos_field)s + %(field_prefix)s%(ref_field)s.length - 1" % locals()
ecdb1a03d37105c92cca28d4d4ee5e3a9bdd542a
19,986
def cell_value(cell): """Returns cell value or an empty string.""" value = getattr(cell, 'value', '') if value is None or str(value).startswith('='): # we don't calculate expressions return '' else: return str(value)
8bce8818b0928042eab6b313ed4cd36e97ca90f5
19,987
def partition_app_list(app_list, n): """ :param app_list: A list of apps with models. :param n: Number of buckets to divide into. :return: Partition apps into n partitions, where the number of models in each list is roughly equal. We also factor in the app heading. """ num_rows = sum([1 + len(x['models']) for x in app_list]) # + 1 for app title num_rows_per_partition = num_rows / n result = [[] for i in range(n)] # start with n empty lists of lists partition = 0 count = 0 for a in app_list: # will the app fit in this column or overflow? c = len(a['models']) + 1 # the +1 is for the app title # if we're not on the last partition, and the models list fits # more on the next partition than this one, start the next partition. if (partition < n - 1) and (count + c/2.0 > num_rows_per_partition): partition += 1 count = 0 result[partition].append(a) count += c return result
8d29daab146c4888b831bb09b2932e056a743d66
19,990
def reward_function(params): """ Example of rewarding the agent to stay inside two borders and penalizing getting too close to the objects in front """ all_wheels_on_track = params["all_wheels_on_track"] distance_from_center = params["distance_from_center"] track_width = params["track_width"] objects_distance = params["objects_distance"] _, next_object_index = params["closest_objects"] objects_left_of_center = params["objects_left_of_center"] is_left_of_center = params["is_left_of_center"] # Initialize reward with a small number but not zero # because zero means off-track or crashed reward = 1e-3 # Reward if the agent stays inside the two borders of the track if all_wheels_on_track and (0.5 * track_width - distance_from_center) >= 0.05: reward_lane = 1.0 else: reward_lane = 1e-3 # Penalize if the agent is too close to the next object reward_avoid = 1.0 # Distance to the next object distance_closest_object = objects_distance[next_object_index] # Decide if the agent and the next object is on the same lane is_same_lane = objects_left_of_center[next_object_index] == is_left_of_center if is_same_lane: if 0.5 <= distance_closest_object < 0.8: reward_avoid *= 0.5 elif 0.3 <= distance_closest_object < 0.5: reward_avoid *= 0.2 elif distance_closest_object < 0.3: reward_avoid = 1e-3 # Likely crashed # Calculate reward by putting different weights on # the two aspects above reward += 1.0 * reward_lane + 4.0 * reward_avoid return reward
a9f1490f999abfe819df300e522e74820edb821e
19,991
def _parse_overscan_shape(rows, columns): """ Parse the number of overscan rows and columns into indices that can be used to reshape arrays. :param rows: The number of overscan rows. :type rows: int :param columns: The number of overscan columns. :type columns: int """ if rows == 0 and columns == 0: return (0, 0) if rows == 0 and columns > 0: return (-1, columns) if rows > 0 and columns == 0: return (rows, -1) if rows > 0 and columns > 0: return (rows, columns)
e3b6bb9c5d5837e9628fcd226f2484d44d4ce454
19,992
import numpy def get_pdf_value(hist, edges, pos): """Get the value of a pdf for a given x :param hist: list of histogram values :param edges: list of bins :param pos: position on the support :return: pdf value """ ed = numpy.digitize(pos, edges).tolist() if ed > len(hist) - 1: return hist[-1] return hist[ed]
fb09eebed11e1f95c1cb9d27dc87c3dc6a758dc0
19,995
def get_window_context(idx, tree, size): """Return a list of words within a 2*size window around the idx position.""" return [node.token for node in tree[max(0, idx-size) : idx] + tree[idx+1 : idx+size+1]]
7e61105f278757505dbcff0f98a3f144844476be
19,997
def _form_function_call(node): """ Reconstructs function call strings when making attribute access calls. """ node_vals = node.value output = "." for x, param in enumerate(node_vals[3::]): if param.dumps()[0] == "(": output = output[0:-1] + param.dumps() return output else: output += param.dumps() + "."
19b47c3fc27e1b514ce6673503b847d8df89fc7f
19,998
def value_in(value, choices): """Raise an exception if a value doesn't match one of the given choices.""" if value not in choices: raise ValueError("Expected one of %s, received %r." % (", ".join([repr(choice) for choice in choices]), value)) # pragma: no cover return value
98abbbcc09c3b042a4ccd359986ae9325d96c442
19,999
import collections def cal_frac_aneu(ploidy, ploidy_list): """ Calculate percentage of aneiploidy for each ploidy list Examples -------- >>> ploidy = [0, 1, 2, 4, 4] >>> ploidy_list = [0, 1, 2, 3, 4] >>> cal_frac_aneu(ploidy, ploidy_list) [0.2, 0.2, 0.2, 0, 0.4] Parameters ---------- ploidy : a list of ploidy ploidy_list : a list of ploidy Returns ------- a list of ploidy fractions """ total = len(ploidy) counts = collections.Counter(ploidy) frac = [] for dos in ploidy_list: if counts[dos]: frac.append(round(counts[dos]/total, 2)) else: frac.append(0) return frac
3940e750d2cd309ba6e341c0f13c42e9b911e0ea
20,000
import torch def signal_framing(signal, frame_length, frame_step, dim=-1): """ Framing with stride tricks (https://git-lium.univ-lemans.fr/Larcher/sidekit/-/blob/master/frontend/features.py) """ dim = dim % signal.dim() shape = list(signal.size()) nframes = (shape[dim] - frame_length + frame_step) // frame_step if nframes > 0: shape = shape[:dim] + [nframes, frame_length] + shape[dim+1:] strides = list(signal.stride()) strides.insert(dim, frame_step*strides[dim]) return torch.as_strided(signal, size=shape, stride=strides) else: return signal.unsqueeze(dim=dim)
d4fdf578816fe4804350747660b76f8c3d528757
20,001
def build_texts_from_movies(path_to_movie_dat): """ Extracts genre text from movies.dat to create semantic embeddings :param path_to_movie_dat: :return: dict of text list keyed by movie_id """ texts = {} with open(path_to_movie_dat, "r", encoding="ISO-8859-1") as f: for line in f: movie_id, title_and_year, genres = line.strip("\n").split("::") title = title_and_year[:-7] # year = title_and_year[-5:-1] sorted_genres = sorted(genres.split("|")) texts[movie_id] = [title] + sorted_genres return texts
e98a8e5eedee7a983431246f0e4968d6f4daaa40
20,002
import copy def override_repo_refs(repos, override_ref=None, overrides=None): """ Returns a new `repos` dictionary with the CLI overrides applied. """ overrides = overrides or {} if not override_ref and not overrides: return repos repos_copy = copy.deepcopy(repos) for repo, repo_data in repos.items(): if not repo_data: continue release_data = repo_data.get("openedx-release") if not release_data: continue local_override = overrides.get(str(repo), override_ref) if local_override: repos_copy[repo]["openedx-release"]["ref"] = local_override return repos_copy
83df5d47b6ceba385d05e2c94341fec9c559ea0d
20,003
import csv def get_entities(entities_file): """Returns an array of Medical Entities :param entities_file: Entities file csv :return: Array<[term:str, score:int]> """ entities = [] with open(entities_file, encoding='utf8') as ds1_file: csv_reader = csv.reader(ds1_file, delimiter=',') for row in csv_reader: entities.append([str(row[0]), row[1]]) return entities
038b3b04aa43906149e7d6799ac66ba9f7d719c0
20,005
import os import json def unset_pending(version_folder): """Modify pending alert level to match true alert level. :param version_folder: Folder containing event.json file which needs to be unset. :returns: True if Changed, False If Alert_level Already Matches True_alert_level. """ eventfile = os.path.join(version_folder, 'json', 'event.json') f = open(eventfile, 'rt') jdict = json.load(f) f.close() if jdict['pager']['alert_level'] == jdict['pager']['true_alert_level']: return False jdict['pager']['alert_level'] = jdict['pager']['true_alert_level'] f = open(eventfile, 'wt') json.dump(jdict, f) f.close() return True
2619a71481f307d32f5b9200753096e1e9f7603a
20,007
def some_different(instructions,bitpos): """Return True if there are ones and zeros and no don't cares, nonterminals or operand deciders""" zero = '0' one= '1' zeros = 0 ones = 0 for i in instructions: if i.ipattern.bits[bitpos].value == zero: zeros += 1 elif i.ipattern.bits[bitpos].value == one: ones += 1 if zeros > 0 and ones > 0: return True return False
3efe01a1dffac42d3ab3c2bdfe3fdb090d063733
20,008
def secondsToMMSS(secs): """Convert number of seconds to the string ``mm:ss``. Note: If the number of minutes is greater than 100, it will be displayed as such. Args: secs (int): Number of seconds. Returns str: String in the format of ``mm:ss``. """ secs = int(secs) minutes, seconds = divmod(secs, 60) return '{:02d}:{:02d}'.format(minutes, seconds)
6cf218ac1b45e2a338bd97bc28472f08077cbcf2
20,009
def image_property_delete(client, prop_ref, image_ref, session=None): """ Used internally by _image_property_create and image_property_update """ return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref)
d50fdc0d9b01fb49d3d5246c346d1982fd99fc18
20,010
def pad_to_max_seq_length(ls, max_seq_length, pad_idx=0, pad_right=True, check=True): """Apply padding to an input sequence. Args: ls: sequence to pad. max_seq_length: max length up to which to apply padding. pad_idx: element to use for padding. pad_right: True if padding is applied to right side of sequence, False to pad on left side. check: True if result length should be checked as under the max sequence length. Returns: Sequence with specified padding applied. """ padding = [pad_idx] * (max_seq_length - len(ls)) if pad_right: result = ls + padding else: result = padding + ls if check: assert len(result) == max_seq_length return result
687954fcda10e14e93b994df2c43e320b29731fd
20,011
import os def at_top_worktree(git_worktree): """ Return True if we are at the root of the worktree """ return os.getcwd() == git_worktree.root
8793b674fc3d2c2b33624287e27ab554edc44dbc
20,012
from subprocess import call import os import copy def load_ifortenv(ifortdir=None, x86=False): """ Load environment variables """ if ifortdir is None: if os.path.isdir('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries' '/windows/bin/'): # ver18?? or generic? ifortdir = ('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries' '/windows/bin/') elif os.path.isdir('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries_2017' '/windows/bin/'): # ver17 ifortdir = ('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries_2017' '/windows/bin/') elif os.path.isdir('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries_' '2016/windows/bin/'): # ver16 ifortdir = ('C:/Program Files (x86)/IntelSWTools/compilers_and_libraries_2016' '/windows/bin/') elif os.path.isdir('C:/Program Files (x86)/Intel/Composer XE 2013 SP1/'): # ver14 ifortdir = 'C:/Program Files (x86)/Intel/Composer XE 2013 SP1/' elif os.path.isdir('C:/Program Files (x86)/Intel/Composer XE 2013/'): # ver14 ifortdir = 'C:/Program Files (x86)/Intel/Composer XE 2013/' if ifortdir is None: return -1 if x86: configstr = 'ia32' else: configstr = 'intel64' with open('tempbat.bat', 'w') as p: p.write('call "{}/ifortvars.bat" {}'.format(ifortdir, configstr)) p.write('\nset > tempenvs.txt') # run the wrapper batch file, creates tempenvs.txt call('tempbat.bat') # import the variables to the current ennvironment with open('tempenvs.txt', 'r') as f: lines = f.read().splitlines() # cleanup os.remove('tempenvs.txt') os.remove('tempbat.bat') _env = copy.deepcopy(os.environ) for line in lines: pair = line.split('=', 1) _env[pair[0]] = pair[1] return _env
87524f513aa272d2840c4aa72dbac77775f77c60
20,013
def get_languages(project_data): """ Get the available languages for the crowdin project. Parameters ---------- project_data : dict Crowdin project data. Returns ------- dict Available languages on crowdin. """ result = {} for language in project_data["targetLanguages"]: result[language["locale"]] = {"id": language["id"], "name": language["name"]} return result
87d4c845e2d64e90d93add2600eae99c25960864
20,014
def get_list_of_revisions(): """List of known Raspberry Pi Revisions. Provides a list of known Raspberry Pi CPU IDs and the corresponding Raspberry Pi model name ("revision"). Args: Returns: dict: dictionary of Raspberry Pi Revisions """ known_revisions = {'0002': 'Model B R1', '0003': 'Model B R1', '0004': 'Model B R2', '0005': 'Model B R2', '0006': 'Model B R2', '0007': 'Model A', '0008': 'Model A', '0009': 'Model A', '000d': 'Model B R2', '000e': 'Model B R2', '000f': 'Model B R2', '0010': 'Model B+', '0011': 'Compute Module', '0012': 'Model A+', 'a01041': 'Pi 2 Model B', 'a21041': 'Pi 2 Model B', '900092': 'Pi Zero', '900093': 'Pi Zero', 'a02082': 'Pi 3 Model B', 'a22082': 'Pi 3 Model B', '9000c1': 'Pi Zero W', 'c03111': 'Pi 4 Model B', 'abcdef': 'TestModel', '0000': 'Unknown'} return known_revisions
74020870f657d6a9b472577df64fada9f1ef161d
20,015
def name(who): """Return the name of a player.""" return "Player {0}".format(who)
ab75e42a9dc70217a148475d32779da0b1e81d75
20,018
import random def process_field(form, field): """ sample """ try: field_name = field.get("name") field_type = field.get("type") field_value = field.get("value") types = ["hidden", "submit", "button", "file", "reset"] if field_name is not None: if field_type is not None and field_type not in types: if ":" in field_name: name = field_name.split(":") field_name = name[-1] return field_type + " " + field_name return if field_type == "hidden": return Field(field_name, field_type, field_value) elif field_type == "text": try: if field_name != "j_captcha_response": fuzz_pattern = "" name = field_name.split(":") final_name = name[len(name) - 1] if final_name == "studentNumber": fuzz_pattern = "19" elif final_name == "documentIdNumber": fuzz_pattern = "0123456789" elif final_name == "email": fuzz_pattern = "mail@ist.utl.pt" return Field(field_name, field_type, fuzz_pattern) except AttributeError: # Some input attributes are blank or aren't of type # 'string', which can't be coerced; so, we just ignore # the errors. pass elif field_type == "radio": radio_options = form.find_all("input", {"type": "radio"}) selected = radio_options[random.randrange(len(radio_options))] return Field(selected.get("name"), field_type, selected.get("value")) elif field_type == "checkbox": checkboxes = form.find_all("input", {"type": "checkbox"}) selected = checkboxes[random.randrange(len(checkboxes))] if selected.has_attr("value"): return Field(selected.get("name"), field_type, selected.get("value")) else: return Field(selected.get("name"), field_type, "on") elif field_type == "date": pass elif field_type == "email": return Field(field_name, field_type, "example@example.com") elif field_type == "search": pass except AttributeError: # Some input attributes are blank or aren't of type 'string', which # can't be coerced; so, we just ignore the errors. pass
6c832dc172fda33a27db99d52a86e1bca614e9a8
20,019
def tv(five_year_fcf, wacc, g= 0.03): """Returns terminal value using Gordon Growth formula.""" last_fcf = five_year_fcf[-1] return last_fcf*(1+g) / (wacc - g)
965175a6d83687cf22d477d6e2aa4755fc48228e
20,020
import torch from typing import Optional from typing import Union from typing import List from typing import Dict from typing import Any from typing import Iterable def _get_param_id_to_param( model: torch.nn.Module, optim_input: Optional[Union[ List[Dict[str, Any]], Iterable[torch.nn.Parameter], ]] = None, ) -> List[torch.nn.Parameter]: """ Constructs a mapping from parameter IDs to parameters. This may be used both for models with ``FlatParameter`` s and without. NOTE: We critically assume that, whether the optimizer input is a list of parameters or a list of parameter groups, :class:`torch.optim.Optimizer` enumerates the parameter IDs in order. In other words, for a parameter list input, the parameter IDs should be in that list order, and for a parameter groups input, the parameter IDs should be in order within each parameter group and in order across parameter groups. Args: model (torch.nn.Module): Model whose parameters are passed into the optimizer. optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]): Input passed into the optimizer representing either a :class:`list` of parameter groups or an iterable of parameters; if ``None``, then this method assumes the input was ``model.parameters()``. (Default: ``None``) Returns: List[torch.nn.Parameter]: Mapping from parameter IDs to parameters, where the parameter ID is implicitly the index in the :class:`list`. """ # Assume the standard case of passing `model.parameters()` to the optimizer # if `optim_input` is not specified if optim_input is None: return list(model.parameters()) try: params = list(optim_input) except TypeError: raise TypeError( "Optimizer input should be an iterable of Tensors or dicts, " f"but got {optim_input}" ) if len(params) == 0: raise ValueError("Optimizer input should not be empty") # Check if the optimizer input represents tensors or parameter groups all_tensors = True all_dicts = True for param in params: all_tensors &= isinstance(param, torch.Tensor) all_dicts &= isinstance(param, dict) if not all_tensors and not all_dicts: raise TypeError( "Optimizer input should be an iterable of Tensors or dicts" ) if all_tensors: return params # type: ignore[return-value] assert all_dicts param_id_to_param = [] for param_group in params: has_params_key = "params" in param_group # type: ignore[operator] assert has_params_key, \ "A parameter group should map \"params\" to a list of the " \ "parameters in the group" for param in param_group["params"]: # type: ignore[index] # Implicitly map `flat_param_id` (current length of the list) to # `param` param_id_to_param.append(param) return param_id_to_param
78001b014d01c84e83ec8d19ab5df19aab7bea48
20,022
import re def match_scene_name(scene_name): """ Args: scene_name (str): FloorPlanXX-<random_seed(int) | default> Returns a tuple of scene name and seed (or 'default') if we can match the format.""" m = re.match("^FloorPlan[0-9]+-([0-9]+|default)$", scene_name) if m is not None: return m.group().split("-") return None
d8d2de675e102984007e735992fb9c0696d4845c
20,023
def design_thick_spherical_transform_lens(n, w, f): """Choose radius of curvature and thickness for a Fourier transform with given focal length and working distance. Args: n: refractive index w: working distance f: transform focal length Returns: roc: radius of curvature d: center thickness """ # Derivation p68 Dane's Fathom logbook #2 roc = f*(n - 1) + w*(n - 1) d = (f - w)*n*roc/(f*(n - 1)) return roc, d
a3ebcdd19cb95a4d369252984e7342a6ee338c48
20,024
def arb_callable(n): """Arb callable for testing.""" return n * 2
1cac3ad2ab9ca74197e91d8404cf21f65b2aa74d
20,025
def _get_header_info(line): """ Get number of sequences and length of sequence """ header_parts = line.split() num_seqs, length = list(map(int, header_parts[:2])) is_interleaved = len(header_parts) > 2 return num_seqs, length, is_interleaved
be7fc522fb8d195af6e45c93e42867aecbd23fb6
20,026
from typing import List def lcs_tabulated(first: str, second: str) -> int: """ Time Complexity: O(n*n) """ len_f: int = len(first) len_s: int = len(second) matrix: List[List[int]] = [[0] * (len_s + 1) for _ in range(len_f + 1)] for f in range(1, len_f + 1): for s in range(1, len_s + 1): if first[f - 1] == second[s - 1]: matrix[f][s] = matrix[f - 1][s - 1] + 1 else: matrix[f][s] = max(matrix[f - 1][s], matrix[f][s - 1]) return matrix[len_f][len_s]
b24c07196a1a032b543b4cbb0f80dcd2c6c339fc
20,027
import json def loadJsonArgs(fn): """ Load the .json file containing input values Args: fn: file name Returns: args: argument dictionary """ with open(fn) as data_file: data = json.load(data_file) args = {} args['patient_id'] = data['Patient ID'] args['start_phase'] = data['Start Phase'] args['total_phase'] = data['Total Phase'] args['im_name'] = data["Image Name"] args['model_output']=data["Output Surface Model Name"] args['seg_name'] = data["Segmentation Name"] args['im_top_dir'] = data["Image Top Dir"] args['seg_folder_name'] = data["Segmentation Folder Name"] args['im_folder_name'] = data["Image Folder Name"] args['out_dir'] = data["Output Dir Name"] args['num_interpolation']=data["Number of Interpolations"] args['num_cycle'] = data["Number of Cardiac Cycles"] args['duration'] = data["Cycle Duration (s)"] args['edge_size'] = data["Mesh Size"] args['mask_folder_name'] = data["Mask Folder Name"] return args
51e8a0fdaf53836cf831701ff6921479a8d8e03f
20,030
import hashlib def getMd5(input_str): """ :param str input_str: Unicode-objects must be encoded before hashing :rtype: str """ hash_obj = hashlib.md5(input_str.encode("utf-8")) return hash_obj.hexdigest()
dd72b9f173a26fe285b8eaa11be8ccd943085c9d
20,031
from pathlib import Path def get_cwd() -> Path: """Determine the current working directory. :return: The appropriate current working directory path :rtype: ~pathlib.Path """ return Path.cwd()
3db9c2b613f02f0678cd0f994ad12701b9229a5a
20,032
def bswap(data): """ Byteswap data """ return data.byteswap()
1d0bf90f948e441514aa7bc0aff0cbe2ecd8283b
20,034
def qasplit(chars, sep=",", quote="'"): """ Quote aware split """ if sep == quote: raise Exception("sep and quote cannot be the same character") can_split = True splitpoints = [-1] last_c = None for index, c in enumerate(chars): if c == quote and last_c != "\\": can_split = not can_split elif c == sep and can_split: splitpoints.append(index) last_c = c if not can_split: raise ValueError("Unterminated quote") splitpoints.append(len(chars)) slices = [chars[splitpoints[i]+1:splitpoints[i+1]] for i in range(len(splitpoints)-1)] return slices
0e205b3ff1830c325a1c2f400471e6833664d9dc
20,037
import json def _GetKeysAsDict(keys_input): """Converts |keys_input| into a dictionary. Args: keys_input: A dictionary or a string pointing to a JSON file. The contents of either should be Skia Gold config data. Returns: A dictionary containing the Skia Gold config data. """ if isinstance(keys_input, dict): return keys_input assert isinstance(keys_input, str) with open(keys_input) as f: return json.load(f)
db5d8e1bc08d326754163bd183f51dea9e7ad499
20,038
def zfill(s, width): """ """ if len(s) < width: return ("0" * (width - len(s))) + s else: return s
ed004f3959107cbf800d00b316dcd7a31fc5c0cd
20,039
def query(conn, string: str): """Perform a query on titanic database and return result.""" curs = conn.cursor() curs.execute(f'{string}') result = curs.fetchall() return result
11c51879cad2cd99d64a3c64dbafe5c95edf6fdd
20,041
import torch def safe_log(x, eps=1e-7): """Avoid taking the log of a non-positive number.""" safe_x = torch.where(x <= eps, eps, x.double()) return torch.log(safe_x)
5d9029d51ee667bc69b0370e49e91fdf5f096c31
20,042
def puncify(s): """Replaces unicode characters with the appropriate ASCII punctuation""" return s.replace(u'\xa0', u' ').replace(u'\u201c', '"').replace(u'\u201d', '"').replace(u'\u2019', "'").replace(u"&amp;", '&').replace(u'\u2026', '...')
670db5ce03943365bd9683e4c75589418246a497
20,044
import re def extract_episode(text, seg_search, eg_search): """ Extract episode number from metadata. :param str text: Metadata containing episode number. :param str seg_search: Regex for a `Super Easy German` episode. :param str eg_search: Regex for an `Easy German` episode. :return: Episode number and type. :rtype: dict """ seg_match = re.search(seg_search, text, re.IGNORECASE) if seg_match: return { 'type': 'super_easy_german', 'number': seg_match.group().strip().replace('(', '').replace( ')', '') } eg_match = re.search(eg_search, text, re.IGNORECASE) if eg_match: return { 'type': 'easy_german', 'number': eg_match.group().strip() }
2215ba4b1aacbf8f3cff8e02bd4814a279e147ca
20,045
def bslice(high, low=None): """ Represents: the bits range [high : low] of some value. If low is not given, represents just [high] (only 1 bit), which is the same as [high : high]. """ if low is None: low = high return slice(low, high + 1)
d3a3085f8da638ef63c7d0f65605543f6f3605b7
20,046
def cvt_pos(pos: list, cvt_mat_t: list): """ param: pos: original coord, [x, y] return: x_map, y_map: new coord after transform """ u = pos[0] v = pos[1] x = (cvt_mat_t[0][0] * u + cvt_mat_t[0][1] * v + cvt_mat_t[0][2]) / ( cvt_mat_t[2][0] * u + cvt_mat_t[2][1] * v + cvt_mat_t[2][2]) y = (cvt_mat_t[1][0] * u + cvt_mat_t[1][1] * v + cvt_mat_t[1][2]) / ( cvt_mat_t[2][0] * u + cvt_mat_t[2][1] * v + cvt_mat_t[2][2]) x_map = round(x/10) y_map = round(y/10) return x_map, y_map
5ce4ad727f9ec7ffad9eda17c22cc0464ea0b2d7
20,048
def serialize_file_list(files: list, active_item_index: int = -1): """Returns a serialized file list, which JRiver requires in some API calls. These are a not documented further, but form a string of comma seperated values. These are, in order: [0] The value '2', stating a serialization version. Only 2 is supported these days. [1] The number of included keys [2] The active element (?), -1 for none [3]..[len(files + 3)]: The keys of the files. """ result = "2;" + str(len(files)) + ";" + str(active_item_index) for file in files: result += ";" + str(file["Key"]) return result
651df9d8cd228f10c9e0324a2d4b9cfc2e7adf10
20,049
def _edge_is_between_selections(edge, selection_a, selection_b): """ Returns ``True`` is the edge has one end in each selection. Parameters ---------- edge: tuple[int, int] selection_a: collections.abc.Container[collections.abc.Hashable] selection_b: collections.abc.Container[collections.abc.Hashable] Returns ------- bool """ return ( (edge[0] in selection_a and edge[1] in selection_b) or (edge[1] in selection_a and edge[0] in selection_b) )
808ee767b44a05fb8258a2bef5621d22131e6467
20,050
from typing import Dict from typing import Set from typing import List def construct_global_order(layers: Dict[str, Dict[str, str]], order: str) -> Dict[int, Set[int]]: """ Return a dictionary for each face which other faces are directly below it. :param layers: :param order: :return: """ adjacency_dict: Dict[int, Set[int]] = {} for _, layer_list in layers.items(): previous: List[int] = [] for layer_str in layer_list[order].split('|'): layer: int = int(layer_str) for face in previous: if face not in adjacency_dict: adjacency_dict[face] = set() adjacency_dict[face].add(int(layer)) previous.append(layer) return adjacency_dict
180a44fe94e30ad00ff9d6cb9934f741676d44ab
20,051
def fmt_section(prefix='', total='', new='', suffix=' · '): """Return formatted section""" section = '' if total or new: section = '{}{}{}{}'.format(prefix, new, total, suffix) return section
465df5096e9c17e9781139ac97276c6673bb5fde
20,053
import math def _cuda_line_circle_intersection(r0, step, radius): """Calculate the distance from r0 to a circle centered at origin along step. r0 must be inside the circle. Parameters ---------- r0 : numba.cuda.cudadrv.devicearray.DeviceNDArray step : numba.cuda.cudadrv.devicearray.DeviceNDArray radius : float Returns ------- float """ A = step[0] ** 2 + step[1] ** 2 B = 2 * (r0[0] * step[0] + r0[1] * step[1]) C = r0[0] ** 2 + r0[1] ** 2 - radius ** 2 d = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A) return d
2e4bbfe58062ef033b01bba1174d7a1323a87ea2
20,055
def func_lef_right_capacities(k_idx): """Computes the expected value of the random variable capacitires of the left and right problems. Computes E(C_lef) and E(C_right) according to expressions (43a) and (43b) in Theorem 19. Args: k_idx: Int. Knapsack capacity. Returns: cap_left_closed: Int. Expected capacity of left subproblem. cap_right_closed: Int. Expected capacity of right subproblem """ kp = k_idx # Knapsack Capacity mu = kp + 1 # Quantity of Items deviation = mu * ((1 + 1 / kp) ** mu + (1 - 1 / kp) ** mu) / 4 - kp * ( (1 + 1 / kp) ** (mu + 1) - (1 - 1 / kp) ** (mu + 1)) / 4 # Deviation of the Knapsack capacities # PILAS, CREO QUE ARRIBA LOS DENOMINADORES SON 8 sum_deviation = 0 for k in range(0, mu, 2): sum_deviation = sum_deviation + k * (1 + 1 / kp) ** (k - 1) # Common sum for both terms cap_left_closed = kp / 2 + sum_deviation / (2 * kp ** 2) + deviation cap_right_closed = kp / 2 - sum_deviation / (2 * kp ** 2) - deviation return cap_left_closed, cap_right_closed
52ef26b3088491229e4387f46297c638b8b821c9
20,056