content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def compute_pascal(n): """ Compute the nth row of Pascal’s triangle Parameters ---------- n : integer which row too compute Returns ------- pascal_n : a list of integers The nth row of Pascal’s triangle. """ pascal_n = [1] prev = 1 for k in range(1,n+1): cur = prev * (n+1-k)/k pascal_n.append(int(cur)) prev = cur return(pascal_n)
54b2d5ca80412d2da4da4e9f09dff25026205d3d
29,012
def param_case(): """ decorator to make new test case class using params """ def _wrapper(cls): name = cls.__name__ test_funcs = {} for attr in dir(cls): it = getattr(cls, attr) if not hasattr(it, '__parameters__') or not callable(it): continue delattr(cls, attr) parameters = getattr(it, '__parameters__') pre_doc = '' if it.__doc__ is None else it.__doc__ for i, param in enumerate(parameters): def _gen_method(_parameter, origin_method): def _test_func(self): return origin_method(self, _parameter) return _test_func test_func = _gen_method(param, it) test_func.__doc__ = '{} with param {}'.format(pre_doc, param) test_funcs['{}_{}'.format(attr, i)] = test_func new_class = type(name, (cls,), test_funcs) return new_class return _wrapper
69aca82239591a159d22d1bcc0361daba00bd13d
29,013
def maximumWealth(accounts): """ :type accounts: List[List[int]] :rtype: int """ res = [] for i in accounts: res.append(sum(i)) return max(res)
e369f3532e0bca1056bdafbd38570dee9c0e0076
29,014
def merge_sorted(xs, ys, key=lambda x: x): """ Merge two sorted lists. :param inplace Insert ys into xs. """ i = 0 j = 0 # xs: 5 6 7 8 # ys: 1 2 3 4 5 9 10 11 # zs = [] zs = [None]*(len(xs) + len(ys)) m = 0 while i < len(xs) and j < len(ys): if key(xs[i]) < key(ys[j]): # zs.append(xs[i]) zs[m] = xs[i] m += 1 i += 1 else: # zs.append(ys[j]) zs[m] = ys[j] m += 1 j += 1 if i < len(xs): # zs.extend(xs[i:]) for k in range(i, len(xs)): zs[m] = xs[k] m += 1 i += len(xs[i:]) elif j < len(ys): # zs.extend(ys[j:]) for k in range(j, len(ys)): zs[m] = ys[k] m += 1 j += len(ys[j:]) return zs
d68cecd81384419e0691f2820efc0acef8d494e9
29,015
def to_snake_case(string: str) -> str: """Quick function to return snake_case from camelCase.""" fmt: list[str] = [] for character in string: if character.isupper(): fmt.append(f"_{character.lower()}") continue fmt.append(character) return "".join(fmt)
14bfd281c748af1ae3ad8522705d43865f73e4bb
29,017
def __api_reverse_suffix(path): """Return the normalized suffix of a url without any api information so that the correct version can be added.""" if path.startswith('/api/'): components = path.split('/', 3) if len(components) >= 4 and components[2].isdigit(): return '/' + components[3] else: return path[4:] else: return path
57534f79a8162769a6a236d5b4df307021d3b573
29,019
def _capitalize_first_letter(c): """Capitalize the first letter of the input. Unlike the built-in `capitalize()` method, doesn't lower-case the other characters. This helps mimic the behavior of `proto-lens-protoc`, which turns `Foo/Bar/BAZ.proto` into `Foo/Bar/BAZ.hs` (rather than `Foo/Bar/Baz.hs`). Args: c: A non-empty string word. Returns: The input with the first letter upper-cased. """ return c[0].capitalize() + c[1:]
d5370fd75d7b8a1f566052b9843981a0d3b71951
29,021
def dbt_cloud_job_factory(job_specs, **kwargs): """ Update keywords in job_specs with values that can be templated """ job = job_specs job["id"] = None job["state"] = 1 job["generate_docs"] = True for key, val in kwargs.items(): job[key] = val return job
70686e701968aba9b9d977a1eccc77d1818241a2
29,025
def GenerateBlock(group): """group is a list of (name, value) pairs in which all values have the same Most Significant Byte. Prefix the list with, and insert dummy (name, value) pairs so that value % 256 = position in the list for all values. Example input: [ ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("GL_BAZ", 0xcc05) ] And the result: [ ("", 0xcc00), ("", 0xcc01), ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("", 0xcc004), ("GL_BAZ", 0xcc05) ] """ i = 0 block = [] for (name, value) in group: while i < value % 256: block.append(("", value)) i += 1 assert i == value % 256 block.append((name, value)) i += 1 return block
d22e6223ede2c60f16f9716aefe1fbc9ac928478
29,026
def determine_prepend(vid_path): """ Create the string to prepend to the file name. Parameters: vid_path - full path name to video directory Returns: prepend - string to prepend to filename. """ vid_1 = vid_path.rstrip('/').split('/')[-1].split('(')[0] vid_2 = vid_path.rstrip('/').split('/')[-1].split('(')[-1].split(')')[0] vid_3 = vid_path.rstrip('/').split('/')[-1].split('(')[-1].split(')')[-1].split('-')[-1] if vid_1 == vid_2: return vid_1 if vid_2 == vid_3: return vid_1 + '_' + vid_2 else: return vid_1 + '_' + vid_2 + '_' + vid_3
8f0d18d8c88b26039fea27a152995685f74d8769
29,027
def get_fprimer_percent_aln(fprimer, percent_alignment): """ Gets the len of the fprimer and calculates minimum percent alignment based on user input of maximum alignment between 2 primers allowed. See percent alignment help for info. Args: fprimer (str): the forward primer Returns: fp_len (int): user defined total number of bases to allowed to match. """ fp_len = [] for fseq in fprimer: f_len = len(fseq) min_dimer_alignment = int(f_len*(percent_alignment/100)) fp_len.append(min_dimer_alignment) return fp_len
07316dabe0cfc12606bf6caddb8cee58a26534f1
29,028
def linearEOS(S,S0=1.0271,beta=1.0,RHO0=1000.0): """ Linear equation of state Returns density from salinity and/or temperature """ return RHO0 * ( beta * (S-S0) )
51b23beff235905f0aa107e148121860c70a5de8
29,029
from typing import Tuple def get_report_format_types() -> Tuple: """ Get all graph summary report formats supported by KGX. Returns ------- Tuple A tuple of supported file formats """ return 'yaml', 'json'
fd3626a21551734179eaeef5423267c302c31013
29,030
import re import collections def cmd_output_split_parser(out, columns=(), starts_at=0, stops_at=0, split_re=r'\s+', column_info_at=-1, start_after_columns_line=False): """ Function to parse the output from a command in a shell :param out: list with the values separated by split_re regex :param columns: list of column names :param starts_at: line where to start reading from :param stops_at: line where to stop reading at. This will usually be zero to mean read all or negative to read until the last N line :param split_re: the regex expression to use to split the values :param column_info_at: if we want to retrieve the information about the columns from a line instead of from the columns argument :param start_after_columns_line: this will try to find the start where the columns line is by finding a line with the columns values given on the argument columns :return: dictionary with the key being the first column and the value being either a dictionary with the keys being the columns from 1 """ def add_columns(key, values): if not key in ret: ret[key] = dict() for i, c in enumerate(columns[1:]): if i > len(values): return ret[key][c] = values[i] continuation = re.compile(r'^\s{4,}') ret = collections.OrderedDict() last = '' split_number = len(columns) - 1 stop = len(out) if not stops_at else stops_at if column_info_at != -1: columns = re.split(split_re, out[column_info_at]) elif start_after_columns_line: column_i = [v.lower() for v in columns] found = False for starts_at, line in enumerate(out): if column_i == [v.lower() for v in re.split(split_re, line, split_number)]: found = True starts_at += 1 break if not found: raise Exception('Header Columns not found') if len(out) > starts_at: for v in out[starts_at:stop]: # check if line is empty: if not v.strip(): continue # check if line is continuation: if continuation.match(v): add_columns(last, re.split(split_re, v, split_number)) continue a = re.split(split_re, v, split_number) last = a[0] if len(a) > split_number: add_columns(last, a[1:]) elif len(a) == 1: ret[a[0]] = dict() # as the prompt changes with the cd we need a way to remove the prompt element added that was not removed during the expect processing return ret
4d64fca8ff5cb71795a4ae1d79b0c31132b85e05
29,031
def dictdict_to_listdict(dictgraph): """Transforms a dict-dict graph representation into a adjacency dictionary representation (list-dict) :param dictgraph: dictionary mapping vertices to dictionary such that dictgraph[u][v] is weight of arc (u,v) :complexity: linear :returns: tuple with graph (listdict), name_to_node (dict), node_to_name (list) """ n = len(dictgraph) # vertices node_to_name = [name for name in dictgraph] # bijection indices <-> names node_to_name.sort() # to make it more readable name_to_node = {} for i in range(n): name_to_node[node_to_name[i]] = i sparse = [{} for _ in range(n)] # build sparse graph for u in dictgraph: for v in dictgraph[u]: sparse[name_to_node[u]][name_to_node[v]] = dictgraph[u][v] return sparse, name_to_node, node_to_name
4dbd8f57230ba1a94a3eb861dddb3b9f35b2a80e
29,032
import os def get_ps_includes(): """ Add directory to list as required """ polysync_install = os.path.join('/', 'usr', 'local', 'polysync') polysync_include = os.path.join(polysync_install, 'include') polysync_pdm = os.path.join(polysync_install, 'pdm') polysync_deps = os.path.join(polysync_include, 'deps') polysync_vendor = os.path.join(polysync_install, 'vendor', 'include') return [ polysync_include, polysync_pdm, polysync_vendor, polysync_deps, os.path.join(polysync_vendor, 'glib-2.0'), os.path.join(polysync_vendor, 'libxml'), os.path.join(polysync_vendor, 'glib-2.0'), os.path.join(polysync_deps, 'dcps', 'C', 'SAC'), os.path.join(polysync_deps, 'sys'), os.path.join('polysync', 'include'), ]
2cda9a73dbd01abef1c2ea1aeb151c9073a7417a
29,033
import json def get_host_osd_map(cls): """ Method to get the OSDs deployed in each of the hosts Args: cls: cephadm instance object Returns: Dictionary with host names as keys and osds deployed as value list """ out, _ = cls.shell(args=["ceph", "osd", "tree", "-f", "json"]) osd_obj = json.loads(out) osd_dict = {} for obj in osd_obj["nodes"]: if obj["type"] == "host": osd_dict[obj["name"]] = obj["children"] return osd_dict
2ce441001814b1fbdd00b32255f3365ade8a5ad5
29,034
def bool_formatter(attr): """Format a boolean into a more readable format Args: attr (bool): a boolean attribute Returns: str: the string "True" or "False", depending of the boolean value of the given attribute. """ return str(attr)
b263320919a13d3f870b7474fbf1f371ef375fe2
29,037
import yaml def read_cfg(cfg_file): """ Read configurations from yaml file Args: cfg_file (.yaml): path to cfg yaml Returns: (dict): configuration in dict """ with open(cfg_file, 'r') as rf: cfg = yaml.safe_load(rf) return cfg
ee44f0f6310240ea5e5d56ebe2ce68fabeb9dcec
29,038
def after_p(text, meta): """Even if there are no save_vars, we will add some standard info about the build, so user can turn it off with 'save_var: False' """ return meta.get('build_strings', '[]') != False
b1388754561a8b75265c332cd41eddf0a00b9ffa
29,039
def intersection(bb1, bb2): """ Calculates the Intersection of two aabb's """ min_w = min(bb1[2], bb2[2]) min_h = min(bb1[3], bb2[3]) if bb1[0] < bb2[0]: leftbb, rightbb = bb1, bb2 else: leftbb, rightbb = bb2, bb1 if bb1[1] < bb2[1]: topbb, bottombb = bb1, bb2 else: topbb, bottombb = bb2, bb1 w = min(min_w, max(leftbb[0] + leftbb[2] - rightbb[0], 0)) h = min(min_h, max(topbb[1] + topbb[3] - bottombb[1], 0)) return w * h
368177cc00fcfff507198f39be3184fc2eed1855
29,040
import functools import time def get_dura(func): """获得函数的运行时间""" print('begin') @functools.wraps(func) def wrapper(*args, **kwargs): s_t = time.time() ret = func(*args, **kwargs) print("spend time:{} s".format(time.time() - s_t)) return ret return wrapper
401a94172d09491b271ea175affe861265005df9
29,043
def find_bgn_fin_pairs(locts): """Find pairs of [bgn, fin] from loctation array """ if len(locts)==0: return [] else: bgns = [locts[0]] fins = [] for i1 in range(1, len(locts) - 1): # range from locts[1] to locts[-2] if locts[i1] - locts[i1 - 1] > 1: fins.append(locts[i1 - 1] + 1) bgns.append(locts[i1] + 1) fins.append(locts[-1]) assert len(bgns)==len(fins) lists = [] for i1 in range(len(bgns)): lists.append([bgns[i1], fins[i1]]) return lists
96eae4eb08fa3cdf951ec4f70149660517c70f86
29,044
def WIFEXITED(status): """Return ``True`` if the process exited using the :manpage:`exit(2)` system call, otherwise return ``False``.""" return False
6d5364bb9ec5399986223f338a68bf2230252dcb
29,045
def any_invalid_file_path(any_invalid_directory_path): """Invalid file.""" return any_invalid_directory_path / "i-am-not-a-real-file.txt"
c3a2831a429f820bfbd7f8b6cf8a5c66fc89d196
29,046
import requests def get_owner_to_roster(player_id_to_custom_id, roster_id_to_owner, league_id, week): """ Gets a map of the owner team name to the roster players Also determines which two teams are in each matchup by getting a map of matchu pid to the two owners playing the game """ owner_to_roster = {} matchup_id_to_owners = {} r = requests.get('https://api.sleeper.app/v1/league/%s/matchups/%s' % (league_id, week)) rosters = r.json() for roster in rosters: owner = roster_id_to_owner[roster['roster_id']] player_ids = roster['players'] custom_ids = [player_id_to_custom_id[player_id] for player_id in player_ids] owner_to_roster[owner] = custom_ids matchup_id = roster['matchup_id'] if matchup_id in matchup_id_to_owners: matchup_id_to_owners[matchup_id].append(owner) else: matchup_id_to_owners[matchup_id] = [owner] return owner_to_roster, matchup_id_to_owners
708a8aa30f347ce278876b44bb7d6eca8bc17530
29,047
def prds_worked_weekly_idx_rule(M): """ Index is (window, tour type, week) if more than one shift length in tour type. :param M: Model :return: Constraint index rule """ return [(i, t, w) for (i, t) in M.okTourType for w in M.WEEKS if len(M.tt_length_x[t]) > 1]
478182485e9bd199550172cf54856d5d867ad010
29,048
def string_to_type(string): """Convert user input to types. Useful for passing user input to ParseKey. Accept str, int, float, or bool. """ if string.lower() in ["string", "str"]: data_type = str elif string.lower() in ["float", "number", "decimal"]: data_type = float elif string.lower() in ["integer", "int"]: data_type = int elif string.lower() in ["boolean", "bool"]: data_type = bool else: raise RuntimeError("Invalid input. Enter a type str, int, float, or bool.") return data_type
71099e95609a6fa26d0bc36f7fa5d63d18ad858a
29,050
def notas(*n, sit=False): """ => A função analisa as notas de um número não fixo de notas :param n: notas dos alunos (Tupla); :param sit: Booleano para definir aparição ou não da situação de cada aluno; :return: retorna a quantidade de notas cadastradas, maior e menor nota, média, situação e a situação geral. """ lista = [n] condicao = [] respostas = dict() soma = 0 for i in lista[0]: soma += i quantidade = len(lista[0]) maior = max(lista[0]) menor = min(lista[0]) media = soma / quantidade condicaogeral = 'ERRO' if sit: c = 0 for i in lista[0]: if i < 50: condicao.append(f'Aluno {c + 1}-REPROVADO') elif i < 70: condicao.append(f'Aluno {c + 1}-RECUPERAÇÃO') elif i <= 100: condicao.append(f'Aluno {c + 1}-APROVADO') else: condicao.append('<nota fora do range>') c += 1 if -1 < media < 50: condicaogeral = f'PÉSSIMA - {media:.2f}' elif media < 70: condicaogeral = f'RUIM - {media:.2f}' elif media < 90: condicaogeral = f'ACEITAVEL - {media:.2f}' elif media < 100: condicaogeral = f'BOA - {media:.2f}' else: condicaogeral = 'ERRO' respostas['Quantidade'] = quantidade respostas['Maior Nota'] = maior respostas['Menor Nota'] = menor respostas['Media das Notas'] = f'{media:.2f}' if sit: respostas['Condição'] = condicao respostas['Condição Geral'] = condicaogeral return respostas
262e2b9a574f1f9119c1876415cfdaff8b3b53b5
29,051
def provides_facts(): """ Doesn't really provide any facts. """ return {}
61d68007ad7b182116ff0d3f86ac3311e151313b
29,052
def create_data_model(): """Stores the data for the problem.""" data = {} data['time_matrix'] = [ [0, 5.6, 8, 12.8, 17.4, 13.8, 13.7, 3.4, 4.2, 11, 16.6, 10.4, 18.1, 44, 7.4, 2.4, 17.8], [5.6, 0, 4.7, 17.2, 2.4, 12.4, 12.8, 7.5, 8.8, 10.1, 16.4, 9, 15.9, 37.4, 11.9, 6.5, 22], [8, 4.7, 0, 19.5, 3.2, 16.4, 8.1, 9.8, 11.2, 5.4, 13.9, 12.9, 11.2, 41.4, 14.3, 8.9, 25.9], [12.8, 17.2, 19.5, 0, 18.9, 16, 25.3, 11, 11.2, 22.6, 27.9, 21.9, 27.8, 31.3, 8.9, 10.9, 18.6], [17.4, 2.4, 3.2, 18.9, 0, 14.1, 11.3, 9.2, 10.5, 8.6, 17.1, 10.7, 14.4, 39.1, 13.7, 8.3, 23.7], [13.8, 12.4, 16.4, 16, 14.1, 0, 21.1, 15.7, 17, 18.4, 24.6, 3.8, 7.1, 29.1, 20.1, 14.7, 30.5], [13.7, 12.8, 8.1, 25.3, 11.3, 21.1, 0, 15.6, 16.9, 4.7, 16.3, 21.3, 14.8, 56.2, 20, 14.6, 31.2], [3.4, 7.5, 9.8, 11, 9.2, 15.7, 15.6, 0, 4.8, 12.9, 18.2, 12.2, 20, 42, 5.8, 1, 15.8], [4.2, 8.8, 11.2, 11.2, 10.5, 17, 16.9, 4.8, 0, 14.2, 19.6, 13.5, 21.2, 42.3, 6.6, 4.3, 17.7], [11, 10.1, 5.4, 22.6, 8.6, 18.4, 4.7, 12.9, 14.2, 0, 13.6, 18.4, 12.1, 53.5, 17.3, 12, 28.2], [16.6, 16.4, 13.9, 27.9, 17.1, 24.6, 16.3, 18.2, 19.6, 13.6, 0, 21.2, 20.6, 58.9, 22.7, 17.3, 30.5], [10.4, 9, 12.9, 21.9, 10.7, 3.8, 21.3, 12.2, 13.5, 18.4, 21.2, 0, 6.2, 32.9, 16.7, 11.3, 26.3], [18.1, 15.9, 11.2, 27.8, 14.4, 7.1, 14.8, 20, 21.2, 12.1, 20.6, 6.2, 0, 36.2, 22.6, 19, 34.8], [44, 37.4, 41.4, 31.3, 39.1, 29.1, 56.2, 42, 42.3, 53.5, 58.9, 32.9, 36.2, 0, 39.8, 41.8, 36.8], [7.4, 11.9, 14.3, 8.9, 13.7, 20.1, 20, 5.8, 6.6, 17.3, 22.7, 16.7, 22.6, 39.8, 0, 5.7, 13.6], [2.4, 6.5, 8.9, 10.9, 8.3, 14.7, 14.6, 1, 4.3, 12, 17.3, 11.3, 19, 41.8, 5.7, 0, 15.7], [17.8, 22, 25.9, 18.6, 23.7, 30.5, 31.2, 15.8, 17.7, 28.2, 30.5, 26.3, 34.8, 36.8, 13.6, 15.7, 0] ] data['time_windows'] = [ (7, 15), # 0 (9, 17), # 1 (9, 15), # 2 (6, 18), # 3 (9, 15), # 4 (9, 15), # 5 (9, 16), # 6 (9, 16), # 7 (9, 17), # 8 (13, 17), # 9 (13, 17), # 10 (9, 17), # 11 (8, 18), # 12 (10, 16), # 13 (9, 17), # 14 (10, 18), # 15 (0, 23) # 16 ] data['num_vehicles'] = 10 data['depot'] = 16 return data
e29847ba9566819374baece4a01af2227f0a2b22
29,053
def ngramname(n): """Return the name of the nth n-gram""" ngrams = ['bigrams', 'trigrams'] return ngrams[n]
b8ecbb832319824ef85f49528f59791fa8ecec40
29,054
def inverse_lagrange(x, y, ya): """Given two lists x and y, find the value of x = xa when y = ya, i.e., f(xa) = ya""" assert(len(x) == len(y)) total = 0 for i in range(len(x)): numer = 1 denom = 1 for j in range(len(x)): if j != i: numer *= (ya - y[j]) denom *= (y[i] - y[j]) total += numer * x[i] / denom return total
12a70d7fe78438de17f5430562e66b0f4a40ce63
29,055
def pt_txt_replace (firstname, surname, pt_txt, redact_message_fname = 'XXXfirstnameXXX', redact_message_sname = 'XXXsurnameXXX'): """ FACTORISING FUNCTION replaces firstname and surname in pt_txt with default redact messages. """ pt_txt_fnamefilter = pt_txt.replace(firstname, redact_message_fname) pt_txt_sfnamefilter = pt_txt_fnamefilter.replace(surname, redact_message_sname) names = [firstname, surname] return pt_txt_sfnamefilter, names
f8786a19789a6ef987d1db1190a0dde2aaf7b93f
29,057
import os def get_size(file): """ get file size, in megabytes :param file: :return: size of file """ size_in_bytes = os.path.getsize(file) size_in_megabytes = size_in_bytes / 1000000 return size_in_megabytes
1dd21941cf6ff3bd4b2a36b2b64bddaddb5a2dd8
29,060
def primary_key(data_base, key, key_value): """Check the primary key value duplicate or not Make sure the primary key of the database is unique, check if the primary key value has been existed in the database. Args: data_base (dict): The data base you want to check key (str): The primary key key_value : This can be multiple datatype, determines the primary key value you want to compare to see if has existed Returns: True or str: indicate if the primary key value is unique """ for record in data_base: if record[key] == key_value: return "{} is the primary key, should be unique!".format(key) return True
3858b1ea24d86bf9fc63bfdc768686ab0453189e
29,061
def letter_swap_rules(): """ Define a subcategory of rules consisting of long-s replacement and other common mismatches. This rules must be absolute, meaning we should never add a mistake where there were not when we apply them. """ letter_swap_dict = dict() f_to_s_dict = {"Préfident":"Président","Confeil":"Conseil","Juftice":"Justice","l'impreffion":"l'impression", "réfolu":"résolu","meffage":"message","meflage":"message","indivifible":"indivisible","ci-deffus": "ci-dessus","ci-deflus":"ci-dessus","fceau":"sceau","préfent":"présent","réfolution":"résolution", "fource":"source","préfente":"présente","claffe":"classe","Laufanne":"Lausanne","Miniftre":"Ministre", "befoin":"besoin","Comiffion":"Commission","ci-deflus":"ci-dessus","auffi":"aussi","prefcrite":"prescrite", "boiffon":"boisson","boiffons":"boissons","néceffaire":"nécessaire","Affemblée":"Assemblée", "meffages":"messages","Commiffion":"Commission","fignatures":"signatures","figné":"signé","fignés":"signés", "fel":"sel","fignée":"signée","fcellé":"scellé","prifonnier":"prisonnier","prifonniers":"prisonniers", "néceffité":"nécessité","Meffieurs":"Messieurs","préfervatif":"préservatif", "Confeils":"Conseils"} letter_swap_dict.update(f_to_s_dict) singleton_rules = {"€":"&","mililaires":"militaires","mililaire":"militaire"} letter_swap_dict.update(singleton_rules) return letter_swap_dict
80d56869bbfc2b032120a58eec3b19b952a6b84a
29,063
def cm2nm(E_cm): """Converts photon energy from absolute cm-1 to wavelength Parameters ---------- E_cm: float photon energy in cm-1 Returns ------- float Photon energy in nm Examples -------- >>> cm2nm(1e5) 100 """ return 1 / (E_cm * 1e-7)
ff7034356a42f01e1c876d0983e581b8c6860dd3
29,065
def render_chart(word_list): """ Renders a bar chart to the console. Each row of the chart contains the frequency of each letter in the word list. Returns: A dictionary whose keys are the letters and values are the freqency (N) of the letter. The value is a string containing the key repeated N times. For example in the string 'apple' the result would be like this: {"A": "a"}, {"E": "e"}, {"L": "l"}, {"P": "pp"} Although not shown above all keys are returned even if the frequency is zero. """ chart = {chr(n): "" for n in range(ord('A'), ord('Z') + 1)} for word in word_list: for letter in word: try: chart[letter.upper()] += letter.upper() except KeyError: continue return chart
7d004d7a3ca61151e9b58b68aa769fdb8c3ab98e
29,066
def clean_popest(table): """Cleans the population estimate data""" return table[["Code", "All ages"]].rename( columns={"Code": "geo_code", "All ages": "pop_2020"} )
ef0b7bb8d9a61709f03889833baba6e3b0ef7a00
29,067
def has_chr(s): """Returns whether the ``str`` starts with ``"chr"``.""" return s.startswith("chr")
43b6e00a3deefe9d88cb7c74413eeb94a3ec6856
29,068
def event_type(play_description): """ Returns the event type (ex: a SHOT or a GOAL...etc) given the event description :param play_description: description of play :return: event """ events = {'GOAL SCORED': 'GOAL', 'SHOT ON GOAL': 'SHOT', 'SHOT MISSED': 'MISS', 'SHOT BLOCKED': 'BLOCK', 'PENALTY': 'PENL', 'FACEOFF': 'FAC', 'HIT': 'HIT', 'TAKEAWAY': 'TAKE', 'GIVEAWAY': 'GIVE'} event = [events[e] for e in events.keys() if e in play_description] return event[0] if event else None
cefd67ae82a3e22a8f8598218887eb9c6a5ea06c
29,069
def cmr_filter_json(search_results, request_type="application/x-hdfeos"): """ Filter the CMR json response for desired data files Arguments --------- search_results: json response from CMR query Keyword arguments ----------------- request_type: data type for reducing CMR query Returns ------- producer_granule_ids: list of ICESat-2 granules granule_urls: list of ICESat-2 granule urls from NSIDC """ #-- output list of granule ids and urls producer_granule_ids = [] granule_urls = [] #-- check that there are urls for request if ('feed' not in search_results) or ('entry' not in search_results['feed']): return (producer_granule_ids,granule_urls) #-- iterate over references and get cmr location for entry in search_results['feed']['entry']: producer_granule_ids.append(entry['producer_granule_id']) for link in entry['links']: if (link['type'] == request_type): granule_urls.append(link['href']) break #-- return the list of urls and granule ids return (producer_granule_ids,granule_urls)
757953aabe2a83040f8e2e206396b80076288242
29,070
import re def version_as_list(version): """ Returns a list of the integer components of the supplied version string. Components are separated by , or . characters """ rv = [] for val in re.split("[.,]", version): if val: rv.append(int(val)) else: rv.append(0) return rv
1cd4073fd0a127b9f833953a0a8d7b8555e3b38c
29,071
import imp def check_modules_installed(modules:list): """Checks if the given modules are installed Returns list of not installed module """ not_installed_modules = [] for module_name in modules: try: imp.find_module(module_name) except ImportError as e: # We also test against a rare case: module is an egg file try: __import__(module_name) except ImportError as e: not_installed_modules.append(module_name) return not_installed_modules
8b6df1124467132dea9a1548adadd7a6d52606fc
29,072
def expandValues(inputs, count, name): """Returns the input list with the length of `count`. If the list is [1] and the count is 3. [1,1,1] is returned. The list must be the count length or 1. Normally called from `expandParameters()` where `name` is the symbolic name of the input. """ if len(inputs) == count: expanded = inputs elif len(inputs) == 1: expanded = inputs * count else: raise ValueError('Incompatible number of values for ' + name) return expanded
6e4fb34cccaf627c7d8d2fedadf35f3ce131d522
29,073
def rectAt(cen, size): """Returns a rectangle of the given `size` centered at the given location. The coordinates are inclusive of borders.""" x, y = cen[:2] w, h = size[:2] return [x-w//2, y-h//2, x-w//2+w-1, y-h//2+h-1]
98fff599be25853eeb488dac58a22ec2f7caaa66
29,074
import signal def RelaySignal(handler, signum, frame): """Notify a listener returned from getsignal of receipt of a signal. Returns: True if it was relayed to the target, False otherwise. False in particular occurs if the target isn't relayable. """ if handler in (None, signal.SIG_IGN): return True elif handler == signal.SIG_DFL: # This scenario is a fairly painful to handle fully, thus we just # state we couldn't handle it and leave it to client code. return False handler(signum, frame) return True
4c4b8c2cd5af7ce4981321571747cab20a5bc01c
29,075
import re def parse_show_snmp_system(raw_result): """ Parse the 'show snmp system' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show snmp system\ command in a dictionary of the form :: { 'System description' : 'OpenSwitchsystem 'System location' : 'Bangalore' 'System contact' : 'xyz@id.com' } """ snmp_system_re = ( r'\s*SNMP\ssystem\sinformation\s*' r'\s*-*\s*' r'\s*System\sdescription\s\:\s*(?P<system_description>.+)' r'\s*System\slocation\s\:\s*(?P<system_location>.+)' r'\s*System\scontact\s\:\s*(?P<system_contact>.+)' ) re_result = re.match(snmp_system_re, raw_result) if re_result is None: return re_result result = re_result.groupdict() return result
3132ab88965bc04198a7d9c6deb290d4ab035280
29,076
def _setup_app_blueprints(app): """ Setup application blueprints. :param mydojo.base.MyDojoApp app: MyDojo application to be modified. :return: Modified MyDojo application :rtype: mydojo.base.MyDojoApp """ app.register_blueprints() return app
bd6b5bf9229bfb09b83899baebb9a4f378b70087
29,078
import requests def login_session(email, password): """ Returns a requests session with FPL login authentication. :param string user: email :param string password: password """ session = requests.Session() # initial request to retrieve csrftoken session.get('https://fantasy.premierleague.com/') csrftoken = session.cookies['csrftoken'] # login request body = { 'csrfmiddlewaretoken': csrftoken, 'login': email, 'password': password, 'app': 'plfpl-web', 'redirect_uri': 'https://fantasy.premierleague.com/a/login' } login_url = 'https://users.premierleague.com/accounts/login/' response = session.post(login_url, data=body) assert "Sign Out" in response.text, "Login unsuccessful, check credentials" return session
ebc50f75c9cbc14b6bd9c0a554c855e968c169c9
29,079
def required_columns(row_to_add, wanted_keys): """ :param row_to_add: Contains the rows from the input file. :param wanted_keys: List of required column header names to be displayed at the end. :return: Dict of keys,values formatted data. """ required_keys = dict((k, row_to_add[k]) for k in wanted_keys if k in row_to_add) return required_keys
8643a592662939cf8b00f009c4dc3f87d1df4e6c
29,080
def _parse_line(s): """Parses a line of a requirements.txt file.""" requirement, *_ = s.split("#") return requirement.strip()
5c0c96898c288a7c358bf978a4415c17c7fb19c4
29,081
import requests def download_content(): """Downloads contributor data from github.""" resp = requests.get('https://api.github.com/repos/kubernetes/kubernetes/stats/contributors') resp.raise_for_status() data = resp.content return data
043830e12e2ecf3ad4e5f585f6411079a36c52dc
29,083
def get_cpgs_orig_methylated(df, cells_to_use): """ Get a list of cpg which where methylated to begin with using a specific set of cells :param df: The df to work on :type df: pd.DataFrame :param cells_to_use: A list of cells to use, should match the rows names of the df :return: The indexes of CpG to use """ df_orig = df.filter(items=cells_to_use, axis=0) mean_values = df_orig.mean() return mean_values
569b0dcf2befd521a5328108ec725f35fdb60c7a
29,084
import torch from typing import Tuple def calculate_gae( values: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor, next_values: torch.Tensor, gamma: float, lambd: float ) -> Tuple[torch.Tensor, torch.Tensor]: """ Calculate generalized advantage estimator Parameters ---------- values: torch.Tensor values of the states rewards: torch.Tensor rewards given by the reward function dones: torch.Tensor if this state is the end of the episode next_values: torch.Tensor values of the next states gamma: float discount factor lambd: float lambd factor Returns ------- advantages: torch.Tensor advantages gaes: torch.Tensor normalized gae """ # calculate TD errors deltas = rewards + gamma * next_values * (1 - dones) - values # initialize gae gaes = torch.empty_like(rewards) # calculate gae recursively from behind gaes[-1] = deltas[-1] for t in reversed(range(rewards.shape[0] - 1)): gaes[t] = deltas[t] + gamma * lambd * (1 - dones[t]) * gaes[t + 1] return gaes + values, (gaes - gaes.mean()) / (gaes.std() + 1e-8)
be8deb444fdc8c708deab9e638ac9a223f85aba6
29,085
import platform import os from pathlib import Path def detect_store_path(bl_check_path_exist=True, srt_sub_path=None, st_local_path=None): """Detects checks if program is running on an AWS Linux Instance In our case, all code that run on AWS linux are running inside conda containers. If running on container, save to data folder. If running on some local machine save results to the user's home path's download folder, data subfolder. Parameters ---------- bl_check_path_exist : `bool` checking saving path if it does not exist srt_sub_path: `string`, optional this is the subpath to be used, in the data folder in EC2 container, or inside the downloads data folder under user directory. st_local_path: `string`, optional local overriding string save path, if not, use download/data folder. This will replace the local path Returns ------- tuple[bool, string] returns boolean if on amzn splatform, then the directory where to store save files """ # detect platform st_plotform = platform.release() # platform specific path if 'amzn' in st_plotform: amzn_linux_status = True spt_local_directory = '/data/' if srt_sub_path is not None: spt_local_directory = os.path.join(spt_local_directory, srt_sub_path) else: amzn_linux_status = False if st_local_path is None: spt_local_directory = os.path.join(str(Path.home()), 'Downloads', 'data') if srt_sub_path is not None: spt_local_directory = os.path.join(spt_local_directory, srt_sub_path) else: spt_local_directory = st_local_path # generate path if it does not exist if bl_check_path_exist: Path(spt_local_directory).mkdir(parents=True, exist_ok=True) return amzn_linux_status, spt_local_directory
ed13a1380b5f50ab6a364461852ce7c08fc492a6
29,086
def get_ema_vars(ema, model): """Get ema variables.""" if ema: try: return { ema.average(v).name: ema.average(v) for v in model.trainable_variables } except: # pylint: disable=bare-except ema.apply(model.trainable_variables) return { ema.average(v).name: ema.average(v) for v in model.trainable_variables } else: return {} else: return {}
4fb63ebc85e627e8a246fbdaa0c35002bcd808bb
29,087
def any_feature(tidy, replace={}, invert=False): """This method... .. warning: Might have an inconsistent behaviour when a replace value is included. Ideally keep just as a boolean outcome. Parameters ---------- tidy: replace: invert: Returns ------- """ # Keep idxs with all na idxs = tidy.isna().all(1) # Copy and replace aux = tidy.copy(deep=True) aux = aux.replace(replace) aux = aux.convert_dtypes() # Create feature feature = aux.any(axis=1).astype('boolean') # Revert if replace and invert: inv = {v: k for k, v in replace.items()} feature = feature.replace(inv) # Issue (None, None) keep as none feature[idxs] = None # Return return feature
882d6e3d5980d1b5b5ada32334a40f2bd044e6a7
29,088
def enclose_string(text): """enclose text with either double-quote or triple double-quote Parameters ---------- text (str): a text Returns ------- str: a new string with enclosed double-quote or triple double-quote """ text = str(text) fmt = '"""{}"""' if len(text.splitlines()) > 1 else '"{}"' enclosed_txt = fmt.format(text.replace('"', r'\"')) return enclosed_txt
c0ce87633da75b019f2b59fa561172e851283fa8
29,089
def _set_PhiScalMARJ_grid_placement(fig, gs): """ using the add_subplot method create the following fig layout +-------------------------------+ | | | | | | | M vs AR | J vs M | | all_ax[1] | all_ax[2] | | | | |-------------------------------| | | | | | phiScal_c vs p_c | | all_ax[0] | | | +-------------------------------+ Parameters --------- fig: Figure the figure whos geometry will be specified gs: class predefined class of how many rows and columns the figure should have Returns ------ all_ax: list list containing all axes defined by the <gs> in <fig> """ all_ax = [] all_ax.append(fig.add_subplot(gs[1:,:])) all_ax.append(fig.add_subplot(gs[0,0])) all_ax.append(fig.add_subplot(gs[0,1])) return all_ax
38be5b8ecb14861c29cf565f991a7661a10cc808
29,090
def summarise_app(app): """Return a string the summarises the important information about the app""" state = app["state"] if state != "Installed": state = state.upper() pin_status = "Pinned" if app["is_pinned"] else "NOT PINNED" known_status = "UNKNOWN" if app["application_version"]["is_unknown"] else "Known" return f"{state}, {pin_status}, {known_status}"
547cf213843e1aa635def247c23e6d353d1ceaaf
29,091
def collapse_redundant_nodes(tree): """ function takes Tree object, tree, and counts number of tips per clade, if the number of tips in the current clade == the number of tips in the previous clade the clade is collapsed because it is redunant. Rendundant clades can arise because of user error or as in the case of reduce_tree(), all the tips are removed from a clade during a trimming process. """ counter = 0 prev_count = None remove_list = [] clade_dict = {} for clade in tree.get_nonterminals(): print (counter, type(counter)) count = clade.count_terminals() clade_dict.setdefault( count, []).append(clade) for count_key in clade_dict.keys(): if len(clade_dict[count_key]): for subclade in clade_dict[count_key]: for clade in clade_dict[count_key]: if subclade in clade and subclade not in remove_list: remove_list.append(subclade) for sub_clade in remove_list : tree.collapse(sub_clade) return tree
947006892381d44d740938d7a38e45b6fb24d7ac
29,092
def has_key(attr, key_name): """ dict key :param attr: :param key_name: :return: """ for it in attr.keys(): if it == key_name: return True return False
fc959d47d0bb582ed7c891090d48637ef33f279a
29,093
import math def getExpectationValue(bitScore: float, searchSequenceLength: int, foundSequenceLength: int, numberOfSequencesInDatabase: int) -> float: """ Returns the E-value of a single query in a sequence database. Parameters ---------- bitScore : float Comparison score, normalised to base 2 (bits). searchSequenceLength : int Length of the sequence that was the input of the query. foundSequenceLength : int Length of the sequence that was the result of the query. If you have several results, run this function for each of them individually. numberOfSequencesInDatabase : int Count of all sequences that could have potentially be found. For example, a search in all genes of eco would mean the count of all sequenced genes for eco -> numberOfSequencesInDatabase = 4,498. A search in all organisms, however, would mean the count of all sequenced genes in KEGG -> numberOfSequencesInDatabase = 25,632,969. Returns ------- float Statistical E-value (expectation value) for the occurence of a match of the same confidence with a totally unrelated, e.g. random, sequence. """ return numberOfSequencesInDatabase * searchSequenceLength * foundSequenceLength * math.pow(2, -bitScore)
01c8440c4be67ada93daf4ebbb519e9a550ae34a
29,095
from typing import Any def get_type_name(t: Any) -> str: """Find the name of a type passed. It will look in sequence for "__name__" and "_name" and if both fail it will take the str(t) Args: t: Any object Returns: String: string with the name of the type """ return getattr(t, '__name__', getattr(t, '_name', str(t)))
bfd81cc4cb93dc636e93c44f7cd94706724d64c0
29,096
def mi2km(mi): """ Converts to miles to kilometers. """ if mi == None: return None return mi * 1.60934
2000ad884b375c525da5d11a9b948354b170d59d
29,097
def construct_path(relation, start, end): """ Constructs a path between two actors using a dictionary of child-parent relationships. Returns a list with actor IDs. """ path = [start] while end != start: path.append(relation[start]) start = relation[start] path.reverse() return path
f1cfb7cd1544406a32c2934956020a4ab768a7e0
29,099
def strfdelta(time_delta, fmt): """ A way to convert time deltas to string formats easily. :param time_delta: timedelta object to convert :param fmt: string format to output :return: String form of timedelta """ d = {"d": time_delta.days} d["h"], rem = divmod(time_delta.seconds, 3600) d["m"], d["s"] = divmod(rem, 60) return fmt.format(**d)
e9b2711dc09e4f0b6087938e9790d11adb5908cb
29,100
import shutil def find_installed_player(): """Find an installed player.""" # find installed player if shutil.which("ffplay"): player = ["ffplay", "ffplay", "-nodisp", "-loglevel", "panic"] elif shutil.which("cvlc"): player = ["cvlc", "cvlc"] elif shutil.which("mplayer"): player = ["mplayer", "mplayer"] else: player = None return player
36b15abac37c3fbbd7da5ecb45d58c8c352c62eb
29,101
def _sleep_time(iter): """Return the time-to-sleep for the n'th iteration of a retry loop. This implementation increases exponentially. :param iter: iteration number :returns: number of seconds to sleep """ if iter <= 1: return 1 return iter ** 2
6abd614bbabc872758049ea35d9ee0ebafd0f2ba
29,102
from typing import OrderedDict def tag_pairs(tags, index=False): """ Return an OrderedDict whose keys are pairs of tags in the format "tag1:tag2" and whose values are a tuple of the two tags used to construct each key, or a tuple of the indices of the two tags in the original tag list, if ``index`` is True. If ``index`` is a list, then it should be a list the same length as ``tags``, and the tuple is populated by indexing into ``index`` using the two indices of the tags in the original tag list. Arguments --------- tags : list of strings Map tags from which to construct cross-spectrum keys like "tag1:tag2". index : bool If True, make values in dictionary the indices of the map tags, rather than the tags themselves. Returns ------- pairs : OrderedDict Dictionary whose keys are pairs of tags in the format "tag1:tag2" and whose values are a tuple of the two tags used to construct the key, or their indices, if index=True. Example ------- >>> tags = ['a', 'b'] >>> tag_pairs(tags) OrderedDict([('a:a', ('a', 'a')), ('a:b', ('a', 'b')), ('b:b', ('b', 'b'))]) >>> tag_pairs(tags, index=True) OrderedDict([('a:a', (0, 0)), ('a:b', (0, 1)), ('b:b', (1, 1))]) >>> tag_pairs(tags, index=['c', 'd']) OrderedDict([('a:a', ('c', 'c')), ('a:b', ('c', 'd')), ('b:b', ('d', 'd'))]) """ pairs = OrderedDict() for it0, t0 in enumerate(tags): for it1, t1 in zip(range(it0, len(tags)), tags[it0:]): xname = "{}:{}".format(t0, t1) if isinstance(index, list): pair = (index[it0], index[it1]) elif index is True: pair = (it0, it1) else: pair = (t0, t1) pairs[xname] = pair return pairs
97ce0a2815b5542275eb5b15c6b3ff434c456a6e
29,103
def add_saved_artists(auths, artists): """ Adds/follows artists. :param auths: dict() being the 'destinations'-tree of the auth object as returned from authorize() :param artists: list() containing the artists IDs to add to the 'destinations' accounts :return: True """ for username in auths: for artist in artists: if artist is not None: auths[username].user_follow_artists([artist]) return True
aeaca1469ab7b74a22a838f2b193d40f75e6461f
29,104
def label_id_map(images_info, num_class): """ 将图像按照类别分组 :param images_info: 图像字典{'img_path': 图像路径,'label': 类别,'img_id':图像id} :param num_class: 类别数 :return: """ # 初始化 label_id_dict = dict() for i in range(num_class): label_id_dict[i] = [] # 逐个图像归类 for i in range(len(images_info)): label = int(images_info[i]['label']) img_id = images_info[i]['img_id'] label_id_dict[label].append(img_id) return label_id_dict
3f910818990c9a9f054e9cc94c014868ff657a8f
29,105
import sys def is_windows(min_release=None, max_release=None): """ True if OS is WINDOWS (between min / max_release) """ global _RELEASE if sys.platform != 'win32': return False if _RELEASE is None: _RELEASE = sys.platform.release() if min_release is not None and int(_RELEASE) < min_release: return False if max_release is not None and int(_RELEASE) > max_release: return False return True
5339f0a8a489f1f56df0dea67a50415da8a08c1b
29,108
import io def parseChart(chartFN): """ @return chartDict ��� ==> g,a ��� ==> k,i ������ ==> k,ya Similarily for Hiragana @setrofim : http://www.python-forum. org/pythonforum/viewtopic.php?f=3&t=31935 """ with io.open(chartFN, "r", encoding="utf-8") as fd: chart = fd.read() lines = chart.split('\n') chartDict = {} output = {} col_headings = lines.pop(0).split() for line in lines: cells = line.split() for i, c in enumerate(cells[1:]): output[c] = cells[0], col_headings[i] for k in sorted(output.keys()): # @k = katakana # @r = first romaji in row # @c = concatinating romaji in column r, c = output[k] if k == 'X': continue romaji = ''.join([item.replace('X', '') for item in [r, c]]) chartDict[k] = romaji return chartDict
61fbd7b2342a553fb0e28d9dfc9eb037eafd2ba9
29,109
from typing import Dict def dict_is_subequal(data: Dict, full_data: Dict) -> bool: """检查两个字典是否相等,忽略在 `full_data` 中有,但 `data` 里没有提供的 key""" for key, value in data.items(): if key not in full_data: return False if value != full_data[key]: return False return True
59c19b538554eaa4b7e6e1bf78f812723fff7b67
29,112
def lily(the_note): """Sets Lilypond accidentals.""" s = the_note.note.lower() if the_note.accidental == '+': s += 'qs' elif the_note.accidental == '#': s += 's' elif the_note.accidental == '++': s += 'tqs' elif the_note.accidental == '-': s += 'qf' elif the_note.accidental == 'b': s += 'b' elif the_note.accidental == '--': s += 'tqf' else: pass return s
e04c14bb13d91ccec5a83de36436aec7d55a7e30
29,113
import ast def get_test_functions(filename): """ Returns a list of test functions. ie. [{'id': 'test_empty_array', 'line': 1}, ...] """ with open(filename) as f: read_data = f.read() module_ast = ast.parse(read_data) funcs = [] for stmt in module_ast.body: if isinstance(stmt, ast.ClassDef): for base in stmt.bases: if isinstance(base, ast.Attribute) and base.attr == 'TestCase' and isinstance(base.value, ast.Name) and (base.value.id == 'unittest' or base.value.id == 'asynctest'): for inner_stmt in stmt.body: if (isinstance(inner_stmt, ast.FunctionDef) or isinstance(inner_stmt, ast.AsyncFunctionDef)) and inner_stmt.name.startswith('test'): funcs.append({ 'id': inner_stmt.name, 'line': inner_stmt.lineno, }) return funcs
2936e3a1e6759837a3a543f62727fb26303cb758
29,114
def pretty_ti_txt(line): """Make given TI TXT line pretty by adding colors to it. """ if line.startswith('@'): line = '\033[0;33m' + line + '\033[0m (segment address)' elif line == 'q': line = '\033[0;35m' + line + '\033[0m (end of file)' else: line += ' (data)' return line
91fc1cfb5dba9467ca58da3154e7343d9845f44a
29,116
import networkx def draw_graph(g: networkx.Graph, fname: str): """Draw a graph using pygraphviz and return the AGraph object. Parameters ---------- g : A graph to draw. fname : The name of the file to write the graph to. Returns ------- : A graphviz graph object. """ g = g.copy() for node in g.nodes: if "\\" in g.nodes[node]["label"]: g.nodes[node]["label"] = g.nodes[node]["label"].replace("\\", "[backslash]") ag = networkx.nx_agraph.to_agraph(g) # Add some visual styles to the graph ag.node_attr["shape"] = "plaintext" ag.graph_attr["splines"] = True ag.graph_attr["rankdir"] = "TD" ag.draw(fname, prog="dot") return ag
761eeb2d44e41ecf704f4aa09eb5e30fbe665030
29,117
def sfc_sw(swup_sfc, swdn_sfc): """All-sky surface upward shortwave radiative flux.""" return swup_sfc - swdn_sfc
de51cf2b3ad410788e041117df01e19e959e3abe
29,119
def read_runlist(filepath): """Read a list of runs from a txt file Parameters ---------- filepath : `str` The input file with the list of runs. Each line should contain raft and run number, e.g., RTM-004-Dev 6106D Returns ------- outlist : `list` A list of tuples with (raft, run) """ fin = open(filepath) lin = fin.readline() outlist = [] while lin: tokens = lin.split() if len(tokens) == 2: outlist.append(tokens) lin = fin.readline() return outlist
de106013ef1cb1de32ed7120b2c5aeab2bf1aafb
29,120
import pathlib def is_pardir(pardir, subdir): """ Helper function to check if the given path is a parent of another. """ return pathlib.Path(pardir) in pathlib.Path(subdir).parents
c7b14ef578f24f2565ae2e88aff248eea3abf963
29,121
def parse_tags(src_mappings: list) -> tuple: """Parse tags. If there are any tags at the end, an no-op instruction will be added. """ dst_tagged = {} dst_mappings = [] dst_cursor = 0 last_tagged_line = -1 last_source_line = -1 for (src_cursor, src_line) in src_mappings: try: # One line, one tag # But you can apply multiple tags on one destination line verdicts = src_line.split() last_source_line = src_cursor if verdicts[0].startswith(":"): tag_name = verdicts[0][1:] dst_tagged[tag_name] = dst_cursor last_tagged_line = dst_cursor else: dst_mappings.append((src_cursor, src_line.lstrip().rstrip())) dst_cursor += 1 except IndexError: # Possibly empty lines pass if last_tagged_line == len(dst_mappings): dst_mappings.append((last_source_line+1, "end")) return (dst_mappings, dst_tagged)
c871bc4511f1f37c4ffec4768f4ced320183e676
29,123
def merge_top(a_list, accessor): """ multiple weights can produce the same top-score, this function merges all top weights. :param a_list: :param accessor: :return: """ a_list.sort(key=lambda x: x['score'], reverse=False) result = a_list[0] weights = [] result['weights'] = weights for entry in a_list: if entry[accessor] == result[accessor]: a_weight = entry.get('weight') if a_weight: weights.append(a_weight) else: break return result
1f96bd3c9fd688badbcd4948c9e71b3728934198
29,124
def energy(layer, data): """ Finds total E deposited in a given layer for each event. """ return data[layer].sum(axis=(1, 2))
d8f73ac1b9fb9a03fdb89420f434a03306872d8a
29,127
from os.path import dirname import unittest def suite(): """Returns unittest.TestSuite of tests. """ py_dir = dirname(dirname(__file__)) return unittest.defaultTestLoader.discover(py_dir, top_level_dir=dirname(py_dir))
6e2a13e474e142c181456a419c3bfefa44e35e44
29,128
def logPPDNA(misfits): """ Calculate the log-posterior probability density function of model This routine converts the input data array to a log posterior probability density function. For example if the input data for each model is a simple sum of squares of residuals weighted by a priori data covariances (i.e standard least squares). Then the Log-PPD is just a factor of -.5 times this. This rouine allows the user to use other Likelihood functions (or forms of posterirori probability density) and if necessary rescale them, or include a priori PDFs """ return -0.5 * misfits
49db6cb6148ab094ea848a7e1f2ee4f07df451b6
29,129
import six def _get_query_dict(**kwargs): """Returns query dict by kwargs. Skip None-values, but keeps False etc. >>> res = _get_query_dict(url=None, test='a', page=True, full=False) >>> res == {'test': 'a', 'page': True, 'full': False} True """ def __get_quey_dict(**kwargs): for k, v in six.iteritems(kwargs): if v is not None: yield k, v return dict(__get_quey_dict(**kwargs))
6d4ba22bb82853356765498f45bd7fda82000921
29,130
def _extract_time_from_disk(parent_ref, name, is_matrix=False): """ This is for handling Matlab save vs Python save when we get to that point. """ temp = parent_ref[name].value if is_matrix: wtf = temp else: # Assuming vector, need to fix for eigenvectors if temp.shape[0] > temp.shape[1]: wtf = temp[:, 0] else: wtf = temp[0, :] return wtf
8a398a6853c482f0ffe6ff85230e0745518c46b3
29,131
import csv def read_data_from_csv(): """ Function to read CSV data and pass it to test functions. :return: test_data_zip - list """ test_data_zip = [] csv_file_path = '/Users/ahamouda/study_projects/API_testing_CI_pytest/request_basics/exercises/ex02_csv_test_date.csv' with open(csv_file_path, newline='') as csvFile: data = csv.reader(csvFile, delimiter=',') return [row for row in data]
cd5024fb0d93431de0d9c23e583266f74a54bece
29,132
import subprocess import re def find_hostname(SETTINGS, file_name): """ """ hostname = 'HOSTNAME-IS-UNKNOW' command = ('git --no-pager diff HEAD^ HEAD -U$(wc -l ' '{file}) {file}').format(file=file_name) output = subprocess.run(command, shell=True, stdout=subprocess.PIPE) if output.returncode == 0: for line in output.stdout.decode('utf-8').split('\n'): for regex in SETTINGS["HOSTNAME_PREFIXES"]: match = re.search(r'{}'.format(regex), line) if match: return match.groups()[0].strip('";') return hostname
421d63b3228b0fce0dc6d1db0d24725ad6516208
29,134
import time def time_strptime(*args, **kwargs): """Version of time.strptime that always uses the C locale. This is because date strings are used internally in the database, and should not be localized. """ return time.strptime(*args, **kwargs)
5410a4b4154471834f70818bf2a0a2356bdc25dd
29,135
import torch def beta_regularizer(mean, std, lmbda, param, function=None): """Beta-distribution regularizer for parameters in range [0, 1]. Functional version utilizing closure technique. Args: mean (float): Mean of beta distribution. std (float): Standard deviation of beta distribution. lmbda (float): Regularizer pre-factor. param (object): Parameter(s) to apply Beta to. function (callable): Optional function (e.g. softplus) to apply to param. Returns: beta_reg: Function to be called in training loop as `loss = vimco(data) + beta_reg()` Example: # Initialize callable regularizer: >>> beta_reg = beta_regularizer(0.1, 0.01, 0.001, gen_model.some_param, function=softplus) # Later in training loop >>> loss = some_loss(data) >>> loss = loss + beta_reg() >>> loss.backward() """ alpha = mean * (mean * (1-mean)/std**2 - 1) beta = alpha/mean * (1-mean) if function is None: function = lambda x: x # identity def beta_reg(): return -lmbda*torch.sum(torch.distributions.beta.Beta(alpha, beta).log_prob(function(param))) return beta_reg
ae806508368022c4f45028a6f45c54352526b884
29,137
import socket import contextlib def get_available_port(): """ :rtype: int """ # this relies on the kernel not reusing previously assigned ports immediately socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) with contextlib.closing(socket_fd): socket_fd.bind(('', 0)) return socket_fd.getsockname()[1]
992e32b903801cb6e3498c781df7578ba58434e9
29,138
def prepare_text(text): """Remove unnecessary spaces and lowercase the provided string""" return text.strip().lower()
337859656670fa9bcd7b4296318ea9a3c4b730f1
29,139
import os def __get_encoded_string(path): """ get the encoded authorization string """ with open(path.format(os.getcwd())) as log: return log.read()
9db9a01981ccc1fbac6c0a28ba92fa1028452647
29,140
def encrypt(key,mensaje): """Funcion que cifra el mensaje mediante una funcion XOR con la llave. Retorna un ciphertext""" cipher="" for i in range(len(mensaje)): t = mensaje[i] k = key[i%len(key)] x = ord(k) ^ ord(t) cipher += chr(x) #Vamos añadiendo los valores producto del XOR al cipher. return cipher.encode("utf-8").hex()
a7ded9223ca8291d2f5cc748bc0e26890bd1d42c
29,141
import torch def pca(X, k, center=True): """ Principal Components Analysis impl. with SVD function :param X: :param k: :param center: :return: """ n = X.size()[0] ones = torch.ones(n).view([n, 1]) h = ((1 / n) * torch.mm(ones, ones.t())) if center else torch.zeros(n * n).view([n, n]) H = torch.eye(n) - h X_center = torch.mm(H.double(), X.double()) u, s, v = torch.svd(X_center) components = v[:k].t() explained_variance = torch.mul(s[:k], s[:k]) / (n - 1) return {'X': X, 'k': k, 'components': components, 'explained_variance': explained_variance}
e2abebfeecb48543f645581820fd095c3718f2fb
29,143