content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def effective_stiffness_from_base_shear(v_base, disp): """ Calculates the effective stiffness based on the base shear and displacement. Typically used in displacement based assessment :return: """ return v_base / disp
a6b48e4dc970c19d0cab3d3c798633512ca62d9a
17,028
def expand_box(box, img_shape, scale=None, padding=None): """Expand roi box Parameters ---------- box : list [x, y, w, h] order. img_shape : list [width, height] scale : float, optional Expand roi by scale, by default None padding : int, optional Expand roi by padding, by default None Returns ------- expanded roi: list [x, y, w, h] order. """ x, y, w, h = box wmax, hmax = img_shape if scale is not None: xo = max([x - (scale - 1) * w / 2, 0]) yo = max([y - (scale - 1) * h / 2, 0]) wo = w * scale ho = h * scale elif padding is not None: xo = max(x - padding, 0) yo = max(y - padding, 0) wo = w + padding * 2 ho = h + padding * 2 else: xo, yo, wo, ho = x, y, w, h if xo + wo >= wmax: wo = wmax - xo - 1 if yo + ho >= hmax: ho = hmax - yo - 1 return [int(xo), int(yo), int(wo), int(ho)]
3fd9c97b8baa70a89b898d3e9d14e8c930d0045e
17,029
def parse_by_prefix(input_string, prefix, end_char=[" "]): """searches through input_string until it finds the prefix. Returns everything in the string between prefix and the next instance of end_char""" start = input_string.find(prefix) + len(prefix) end = start while input_string[end] not in end_char: end += 1 return input_string[start:end]
8cc80c9c359ae155ed4f8f197c1e9bd604cebf1d
17,030
import logging def reformat(response_query): """ Reformats elasticsearch query to remove extra information """ data = list() for hit in response_query["hits"]["hits"]: data.append( { "id": hit["_id"], "source_url": hit["_source"]["post_url"], "title": hit["_source"]["title"], "company": hit["_source"]["company"], "description": hit["_source"]["description"], "date_published": hit["_source"]["publication_date"], "location_city": hit["_source"]["location_city"], "location_state": hit["_source"]["location_state"], "geo_locat": hit["_source"]["location_point"], } ) logging.info(f"Reformatted {len(data)} returned responses") return {"jobs": data}
87ae38ce28953498bac9839e537a09ed07a106ab
17,033
import yaml def load_config(): """ Load config data from config.yaml """ with open("config.yaml", 'r') as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc)
48d67ddb0049ef06827ac2de042fc9e48c4c02f4
17,034
def starrating(key): """Convert the A|B|C|X|L reliability rating to an image""" ratings = {"A": u"★★★", "B": u"★★", "C": u"★", "X": "X", "L": "L"} return ratings[key]
4ef9f4fdafe46ca639e26267f478e0d775785453
17,035
import argparse def _parse_args(): """Parse Command Arguments.""" desc = 'Download SmugMug galleries' parser = argparse.ArgumentParser(description=desc) parser.add_argument('download_url', help='SmugMug galleries URL') return parser.parse_args()
a8057f9f76b70d390fab0fb0cb802b71b2faea1e
17,036
def FindPosition(point, points): """Determines the position of point in the vector points""" if point < points[0]: return -1 for i in range(len(points) - 1): if point < points[i + 1]: return i return len(points)
11ccabcade65053ccfa6751813d90a0eeaccc339
17,040
def rm_url_parts (url, remove): """Dynamically get the url <remove> steps before the original url""" parts = url.split("/") for x in range(0, remove): del parts[-1] url = '/'.join(map(str, parts)) return url
e569144b01770ddbb703062bfac9ff908a35c589
17,041
import requests def package_info(package_url): """Return latest package version from PyPI (as JSON).""" return requests.get(package_url).json().get('info')
cba261a0649eed30329fd479e018fba11e96d022
17,043
def check_digit10(firstninedigits): """Check sum ISBN-10.""" # minimum checks if len(firstninedigits) != 9: return None try: int(firstninedigits) except Exception: # pragma: no cover return None # checksum val = sum( (i + 2) * int(x) for i, x in enumerate(reversed(firstninedigits))) remainder = int(val % 11) if remainder == 0: tenthdigit = 0 else: tenthdigit = 11 - remainder if tenthdigit == 10: tenthdigit = 'X' return str(tenthdigit)
33d8da015a471e5e9f29eb4c9b2b0173979d8130
17,044
import os def construct_makeblastdb_cmd(infile, outdir, blastdb_exe): """Returns a tuple of (cmd_line, filestem) where cmd_line is the BLAST database formatting command for the passed filename, placing the result in outdir, with the same filestem as the input filename. The formatting assumes that the executable is makeblastdb from BLAST+ - infile - input filename - outdir - location to write the database - blastdb_exe - path toBLAST database construction executable >>> construct_makeblastdb_cmd('../tests/seqdata/infile1.fasta', \ '../tests/output/', 'makeblastdb') ('makeblastdb -dbtype prot -in ../tests/seqdata/infile1.fasta -title \ infile1 -out ../tests/output/infile1.fasta', 'infile1') """ filename = os.path.split(infile)[-1] # strip directory filestem = os.path.splitext(filename)[0] # strip extension outfname = os.path.join(outdir, filename) # location to write db cmd = "{0} -dbtype prot -in {1} -title {2} -out {3}".format(blastdb_exe, infile, filestem, outfname) return (cmd, filestem)
79039720c175644a5332763a5e6f5979f2cb3610
17,045
def _load_captions(captions_file): """Loads flickr8k captions. Args: captions_file: txt file containing caption annotations in '<image file name>#<0-4> <caption>' format Returns: A dict of image filename to captions. """ f_captions = open(captions_file, 'rb') captions = f_captions.read().decode().strip().split('\n') data = {} for row in captions: row = row.split("\t") row[0] = row[0][:len(row[0])-2] try: data[row[0]].append(row[1]) except: data[row[0]] = [row[1]] f_captions.close() return data
89a00a5befe1162eda3918b7b6d63046fccd4c70
17,046
from typing import List def text_to_bits( text: str, encoding: str = "utf-8", errors: str = "surrogatepass", ) -> List[int]: """ Takes a string and returns it's binary representation. Parameters ---------- text: str Any string. Returns ------- A list of 0s and 1s. """ bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:] bits_list = [] for bit in bits.zfill(8 * ((len(bits) + 7) // 8)): bits_list.append(int(bit)) return bits_list
cc9ab6497ab3b797625016176b74a6660ed59a80
17,047
def get_the_written_file_list(writefile_cursor): """Return the written files (W).""" written_files_query = ''' SELECT process, name, mode FROM opened_files WHERE mode == 2 ''' writefile_cursor.execute(written_files_query) return writefile_cursor.fetchall()
8abe57fd88d569d5cf48b13dfbcfc142fa6c1504
17,049
import os def files(path): """ Gets the list of all the files Args: path (str): Path to the folder Returns: list: list of all the files """ files_list=[] for root, dirs, files in os.walk(path): for filename in files: files_list.append(filename) return files_list
5f9ed867df47156e9bcea194e49892cd43f677ca
17,050
def final_nonzero(L): """ Return the index of the last non-zero value in the list. """ for index, val in reversed(list(enumerate(L))): if val: return(index) return(0)
1064987732146a9f6c12a2cab1dc84d2657fa321
17,052
from typing import Optional import os def directory(root: Optional[str], name: Optional[str]) -> str: """Handle name of directory.""" if name is None or not os.path.isdir(name): name = os.getcwd() result = os.path.abspath(name) if root is not None: if not result.startswith(root): result = root return result
f0f2ec97241084a7aeec999389f3fcc57a19f251
17,053
def points_to_string(points): """ Returns legacy format supported by Insight """ points = ["%s,%s" % (p[0], p[1]) for p in points] csv = ", ".join(points) return "points[%s] points1[%s] points2[%s]" % (csv, csv, csv)
4588393ee05ddc03cb24e9638abce756521177a2
17,054
def pattern_count(text: str, pattern: str) -> int: """Count the number of occurences of a pattern within text Arguments: text {str} -- text to count pattern in pattern {str} -- pattern to be counted within text Returns: int -- The number of occurences of pattern in the test Example: >>> pattern_count("ACAACTATGCATACTATCGGGAACTATCCT", "ACTAT") 3 """ count = 0 pattern_size = len(pattern) for i in range(len(text) - pattern_size + 1): if text[i:i+pattern_size] == pattern: count += 1 return count
e6fcd2f0645141a3ddf211facb49058deb6dc1fd
17,055
import time def timestamp(): """ The current epoch timestamp in milliseconds as a string. Returns: The timestamp """ return str(int(time.time() * 1000))
2d1d6bc545f5b45d5a16a636837f63d85d681a1f
17,056
def expand_dates(df, columns=[]): """ generate year, month, day features from specified date features """ columns = df.columns.intersection(columns) df2 = df.reindex(columns=set(df.columns).difference(columns)) for column in columns: df2[column + '_year'] = df[column].apply(lambda x: x.year) df2[column + '_month'] = df[column].apply(lambda x: x.month) df2[column + '_day'] = df[column].apply(lambda x: x.day) return df2
ea07a26a271f8d9be05858392e51ec28c851efdc
17,057
import os def get_stat_result(input_stat): """ Wrapper for os-specific stat_result instance. Args: input_stat (stat_result or stat_result): instance you'd like to clone. Returns: nt.stat_result or posix.stat_result, dependent on system platform. """ if os.name == "posix": from posix import stat_result # pylint: disable=E0401 else: # pragma: no cover from nt import stat_result # pylint: disable=E0401 return stat_result(input_stat)
90cfd2015ea8a3fce9280e4a42391be866a77794
17,059
def unquote(text): """Unqoute the text from ' and " text: str - text to be unquoted return text: str - unquoted text >>> unquote('dfhreh') 'dfhreh' >>> unquote('"dfhreh"') 'dfhreh' >>> unquote('"df \\'rtj\\'"') == "df 'rtj'" True >>> unquote('"df" x "a"') '"df" x "a"' >>> unquote("'df' 'rtj'") == "'df' 'rtj'" True >>> unquote('"dfhreh"\\'') == '"dfhreh"\\'' True >>> unquote('"rtj\\'a "dfh" qw\\'sd"') == 'rtj\\'a "dfh" qw\\'sd' True >>> unquote('"\\'dfhreh\\'"') 'dfhreh' """ # Ensure that the text is quoted quotes = '"\'' # Kinds of the resolving quotes tlen = 0 if not text else len(text) # Text length if tlen <= 1 or text[0] not in quotes or text[-1] != text[0]: return text q = [] # Current quotation with its position qnum = 0 # The number of removing quotations for i in range(tlen): c = text[i] # Current character (symbol) if c not in quotes: continue # Count opening quotation if not q or q[-1][0] != c: q.append((c, i)) continue # Closing quotation compensates the last opening one if len(q) == tlen - i and tlen - i - 1 == q[-1][1]: qnum += 1 else: qnum = 0 q.pop() return text[qnum:tlen-qnum]
68f3498b5224e76e961d3f5e7570fc09ca334034
17,062
import re def omit_url(text): """ URLを省略する :param text: オリジナルのテキスト :return: URLの省略したテキスト """ pattern = r'https?://[\w/:%#\$&\?\(\)~\.=\+\-]+' return re.sub(pattern, 'URL省略', text)
2d1a9b4be2aead8fef2f59dba6847b886a7ae313
17,064
import inspect import sys def get_all(): """ Returns a list of all aggregator classes """ temp = inspect.getmembers(sys.modules[__name__], inspect.isclass) return [i[1] for i in temp if i[0] != "Aggregator"]
a25859ffe4871790d27e02c0c77af1244ba115d8
17,066
def _Divide(x, y): """Divides with float division, or returns infinity if denominator is 0.""" if y == 0: return float('inf') return float(x) / y
dee5ef0c4160c45ee9c8ee6aee651d60c3e70252
17,067
def get_results(m): """ Extract model results as dict Parameters ---------- m : Pyomo model instance Model instance containing solution (post-solve) Returns ------- results : dict Dictionary containing model results """ results = { "x": m.x.value, "y": m.y.value } return results
1dcb35bac7fe2379b096bb2fd838ed53a7ebaca4
17,068
import string def AsciiUpper(N): """ ascii uppercase letters """ return string.ascii_uppercase[:N]
aa62357ebe821dd4b6f359a22269916e64d29414
17,069
def check_do_touch(board_info): """.""" do_touch = False bootloader_file = board_info.get('bootloader.file', '') if 'caterina' in bootloader_file.lower(): do_touch = True elif board_info.get('upload.use_1200bps_touch') == 'true': do_touch = True return do_touch
39683a63ed2aac3624176e340825180424aa5f46
17,072
def get_weight(op, return_name=True): """ get the weight of operators with weight.""" for inp in op.all_inputs(): if inp._var.persistable == True: if return_name: return inp.name() else: return inp
01f5df2c93d1b8b84c1f51c9e2c6ff75b9f54c78
17,073
def get_number_coluna(janela, chuva_height): """Determina o numero de linhas com gotas que cabem na tela.""" availble_space_y = (janela[1] - (3 * chuva_height)) number_coluna = int(availble_space_y / (2 * chuva_height)) return number_coluna
a41c2ae23da33149c88a507cf900b9f8e2772622
17,074
import argparse import os def parse_arguments() -> argparse.Namespace: """ Parse arguments for TNT-based implied weighting branch support Args: None Example: $ python ./pyiwe_runner.py :return: argparse.Namespace """ parser = argparse.ArgumentParser(description='Argument parser for pyiwe_runner.py') parser.add_argument('feature_matrix', metavar='feat_matrix', type=str, help='str, path to the feature matrix for TNT') parser.add_argument('-k_start', metavar='k_start', type=float, default=1e-2, help='float, minimum value in a linear scale or a degree in a logarithmic scale, default=1e-2') parser.add_argument('-k_stop', metavar='k_stop', type=float, default=1.5, help='float, maximum value in a linear scale or a degree in a logarithmic scale, default=1.5') parser.add_argument('-k_num', metavar='k_num', type=int, default=100, help='int, number of samples to generate, default=100') parser.add_argument('-k_scale', metavar='k_scale', type=str, default='log', choices=('log', 'lin'), help='str, scale of concavity values, `log` or `linear`, default=`log`') parser.add_argument('-n_runs', metavar='n_runs', type=int, default=3, help='int, the number of repeated IW runs, default=3') parser.add_argument('-cutoff', metavar='cutoff', type=float, default=0.5, help='float, cutoff value between 0.0 and 1.0 for a final majority rule tree, default=0.5') parser.add_argument('-xmult_hits', metavar='xmult_hits', type=int, default=5, help='int, produce N hits to the best length and stop, default=5') parser.add_argument('-xmult_level', metavar='xmult_level', type=int, default=3, help='int, set level of search (0-10). Use 0-2 for easy data, default=3') parser.add_argument('-xmult_drift', metavar='xmult_drift', type=int, default=5, help='int, cycles of drifting;, default=5') parser.add_argument('-hold', metavar='hold', type=int, default=500, help='int, a tree buffer to keep up to specified number of trees, default=500') parser.add_argument('-output_folder', metavar='output_folder', type=str, default=os.path.join('.', 'output'), help='str, path to store data, default=./output') parser.add_argument('-log_base', metavar='log_base', type=float, default=10.0, help='float, base for calculating a log space for concavity constants, default=10.0') parser.add_argument('-float_prec', metavar='float_prec', type=int, default=5, help='int, Floating point calculations precision, default=5') parser.add_argument('-tnt_seed', metavar='tnt_seed', type=str, default='1', help='str, random seed properties for TNT, default=`1`') parser.add_argument('-seed', metavar='seed', type=int, default=42, help='str, random seed for Python numpy, default=42') parser.add_argument('-tnt_echo', metavar='tnt_echo', type=str, choices=('-', '='), default='-', help='str, `=`, echo each command, `-`, don`t echo, default=`-`') parser.add_argument('-memory', metavar='memory', type=float, default=1024 * 10, help=f'float, Memory to be used by macro language, in KB, default={1024 * 10}') parser.add_argument('-c', action='store_true', help='bool, clear temp *.tre files in output folder after processing') parser.add_argument('-v', action='store_true', help='bool, add processing verbosity') return parser.parse_args()
722e4304b41890af00276e8869895f1a9d90248e
17,078
import torch def sample_indices(length, proportion, generator=None, seed=None): """Vector indices to select ``proportion`` from a batch of size ``length``.""" if generator is None: generator = torch.Generator() if seed is not None: generator = generator.manual_seed(seed) subset_size = int(length * proportion) return torch.randperm(length, generator=generator)[:subset_size]
2a622365d2a7eb36d00a8f19fcc7448905e599fa
17,079
def _readSatCatLine(line): """Returns the name, international designator (id), nad NORAD catalog number (catNum) from a line in the satellite catalog. """ name = line[23:47].strip() id = line[0:11].strip() catNum = line[13:18].strip() return name, id, catNum
7d30ab9836f30cb7c10285ad86ca70cad7965b9c
17,082
import base64 def load_image(img_path): """Load an image from a path""" with open(img_path, "rb") as f: img_bytes = f.read() return base64.b64encode(img_bytes).decode("utf8")
5eb56ac5e3a844481fe0975dc18500ee990f62be
17,083
def n_glass(wavelength_in_nm): """for better data see refractive_index.py .. but this is what I'm using in lumerical and lua, and I want to be consistent sometimes""" data = {450: 1.466, 500: 1.462, 525: 1.461, 550: 1.46, 575: 1.459, 580: 1.459, 600: 1.458, 625: 1.457, 650: 1.457} if wavelength_in_nm not in data: raise ValueError('bad wavelength'+repr(wavelength_in_nm)) return data[wavelength_in_nm]
92106735d74776402407539e99ea48d9744ca6f9
17,085
def test_accuracy(reference_src, reference_tar, aligned_src, aligned_tar, penalty_points=None): """ Tests aligned lists of strings against reference lists, typically hand aligned. Args: reference_src: list of reference source strings reference_tar: list of reference target strings aligned_src: list of auto-aligned source strings aligned_tar: list of auto-aligned target strings penalty_points: dict of error types and penalty points. Default is {'bad': 1, 'noise': 1, 'missed': 1} Returns: dict """ if penalty_points is None: penalty_points = {'bad': 1, 'noise': 1, 'missed': 1} if not (isinstance(reference_src, list) and isinstance(reference_tar, list) and isinstance(aligned_src, list) and isinstance(aligned_tar, list)): raise Exception("Expecting reference_src, reference_tar, aligned_src, and aligned_tar to be of type list.") if len(reference_src) != len(reference_tar): raise Exception( "Expecting reference_src and reference_tar to have the same length") if len(aligned_src) != len(aligned_tar): raise Exception( "Expecting aligned_src and aligned_tar to have the same length") reference_src = [item.lower().strip() for item in reference_src] reference_tar = [item.lower().strip() for item in reference_tar] aligned_src = [item.lower().strip() for item in aligned_src] aligned_tar = [item.lower().strip() for item in aligned_tar] # find mismatches. Penalize by 1 point per mismatch bad = [] missed = [] missed_points = 0 bad_points = 0 correct_count = 0 for src_index, src in enumerate(reference_src): tar = reference_tar[src_index] if src not in aligned_src: # no match here between src lists missed_points += 1 missed.append(src) continue tar_index = aligned_src.index(src) if aligned_tar[tar_index] != tar: bad_points += 1 bad.append(src) else: correct_count += 1 # find noise. Penalize by 1 point per noisy item noise = [] noise_points = 0 for src_index, src in enumerate(aligned_src): if src not in reference_src: noise_points += 1 noise.append(src) # apply weights to penalty factors bad_points = bad_points * penalty_points['bad'] noise_points = noise_points * penalty_points['noise'] missed_points = missed_points * penalty_points['missed'] # find score # score = (len(reference_src) - bad_points - noise_points - missed_points) / len(reference_src) error_rate = (bad_points + noise_points) / len(reference_src) return {'correct_count': "{}/{}".format(correct_count, len(reference_src)), 'error_rate': error_rate, 'correct_rate': correct_count / len(reference_src), 'bad_points': bad_points, 'noise_points': noise_points, 'missed_points': missed_points, 'bad': bad, 'noise': noise, 'missed': missed}
c220f9d9aa04adfdb6fa07231c9913505d54ef8d
17,086
def apply_color_reduction(bgr_img): """減色処理を適用します。 Arguments: bgr_img {numpy.ndarray} -- BGR画像(3ch) Returns: numpy.ndarray -- 処理後のBGR画像(3ch) Notes: 入力はRGB画像でも正常に動作します。 """ out_img = bgr_img.copy() out_img[(0 <= out_img) & (out_img < 63)] = 32 out_img[(63 <= out_img) & (out_img < 127)] = 96 out_img[(127 <= out_img) & (out_img < 191)] = 160 out_img[(191 <= out_img) & (out_img < 256)] = 224 return out_img
55cb5cfb207e9e2f9c7f7a94f9ef8d690a8d247f
17,087
def ensure_value(namespace, dest, default): """ Thanks to https://stackoverflow.com/a/29335524/6592473 """ stored = getattr(namespace, dest, None) if stored is None: return default return stored
5f9d43131366592c0ec71913c814da41ff5c56ea
17,088
def http(server): """Test client. Usage: with http as c: response = c.get(uri) print response.parsed_data """ client = server.test_client() return client
11ebc0ad1c4541f76270166cf82a110483d4a4d6
17,089
def _argsort(it, **kwargs): """ Renvoie une version triée de l'itérable `it`, ainsi que les indices correspondants au tri. Paramètres : ------------ - it : itérable - kwargs Mots-clés et valeurs utilisables avec la fonction built-in `sorted`. Résultats : ----------- - indexes : itérable d'indices Indices des éléments de `it`, dans l'ordre dans lequel les éléments apparaissent dans `sortedit`. - sortedit : itérable Version triée de `it`. Exemples : ---------- >>> it = [2, 1, 3] >>> indexes, sortedit = _argsort(it) >>> indexes (1, 0, 2) >>> sortedit (1, 2, 3) >>> [it[x] for x in indexes] [1, 2, 3] >>> indexes, sortedit = _argsort(it, reverse=True) >>> indexes (2, 0, 1) >>> sortedit (3, 2, 1) """ indexes, sortedit = zip(*sorted(enumerate(it), key=lambda x: x[1], **kwargs)) return indexes, sortedit
f36e0ac863c3861ba7f1e222ac3712c977364d98
17,090
from typing import List import re def solution(raw: str, markers: List[str]) -> str: """Remove all comments from raw string as indicated by markers.""" return '\n'.join( [re.split(r'|'.join(map(re.escape, markers)), line)[0].strip() for line in raw.split('\n')]) if markers else raw
1b8a5b38e57d5700958dbaa9340c0c4ec93e6062
17,091
import torch def retrieval_eval_collate(data): """Creates mini-batch tensors from the list of tuples (src_seq, trg_seq).""" # separate source and target sequences text, text_length,segmentt_ids,img, img_loc, _label = list(zip(*data)) _inputs = [torch.stack(text,dim=0),torch.stack(text_length,dim=0),torch.stack(segmentt_ids,dim=0), torch.stack(img, dim=0).view(-1, 100, img[0].size()[-1]), torch.stack(img_loc, dim=0).view(-1, 100, img_loc[0].size()[-1]), torch.stack(_label, dim=0) ] return _inputs
f3ebd19ab458b48f4c2339d2a4c7d179193f9a6a
17,092
def extractdistinct(df_wide): """Extract and reformat to standard form for disinct point set""" df1_distincttemp = df_wide[df_wide['DistinctRoute?'] == True] # df1_distinct = df1_distinct.drop([x for x in df1_distinct if x.endswith('_base')], 1) df1_distinct = df1_distincttemp.reset_index(drop=True) return df1_distinct, df1_distincttemp
5fe537d1e4f3f30716c58eab147ea85be01c3f33
17,093
def check_tag_legality(tags): """ 检查标签列表合法性: 标签id域,是否重复,父子关系 :param tags: list 标签配置列表 :return: boolean 是否合法 """ max_id = 0 tag_code_set = set() tag_relation_dict = dict() for tag_item in tags: tag_id = str(tag_item["id"]) tag_code = str(tag_item["code"]) parent_id = str(tag_item["parent_id"]) if int(tag_id) > max_id: max_id = int(tag_id) tag_code_set.add(tag_code) tag_relation_dict[tag_id] = parent_id if max_id > 10000: print("[ERROR] tag_id bigger than 10000: {}".format(max_id)) return False if len(tag_code_set) != len(tags): print("[ERROR] duplicate tag codes exist, has {} ids but only {} codes".format(len(tags), len(tag_code_set))) return False for parent_id in tag_relation_dict.values(): if int(parent_id) != 0 and parent_id not in tag_relation_dict: print("[ERROR] parent code id {} is not exists".format(parent_id)) return False return True
0094d772c5bc0967e8b7a5f3db5a28bd7d7f64b1
17,095
def _create_postgres_url(db_user, db_password, db_name, db_host, db_port=5432, db_ssl_mode=None, db_root_cert=None): """Helper function to construct the URL connection string Args: db_user: (string): the username to connect to the Postgres DB as db_password: (string): the password associated with the username being used to connect to the Postgres DB db_name: (string): the name of the Postgres DB to connect to db_host: (string): the host where the Postgres DB is running db_host: (number, optional): the port to connect to the Postgres DB at db_ssl_mode: (string, optional): the SSL mode to use when connecting to the Postgres DB db_root_cert: (string, optional): the root cert to use when connecting to the Postgres DB Returns: [string]: Postgres connection string """ ssl_mode = '' if db_ssl_mode: # see # https://www.postgresql.org/docs/11/libpq-connect.html# # LIBPQ-CONNECT-SSLMODE ssl_mode = '?sslmode=%s' % (db_ssl_mode) if db_root_cert: ssl_mode += '&sslrootcert=%s' % (db_root_cert) return ('postgresql://%(user)s:%(password)s@%(host)s:%(port)s/' '%(db)s%(ssl)s' % { 'user': db_user, 'password': db_password, 'db': db_name, 'host': db_host, 'port': db_port, 'ssl': ssl_mode})
f617f7f85545fcf2a1f60db8c9c43e0209c32c4f
17,096
import json def json_format(obj): """Formatter that formats as JSON""" return json.dumps(obj, ensure_ascii=False)
1cfc2d46499dfe3a8bbd5db60200fadf9ccf0551
17,097
def to_case_fold(word: str): """ The casefold() method is an aggressive lower() method which convert strings to casefolded strings for caseless matching. The casefold() method is removes all case distinctions present in a string. It is used for caseless matching (i.e. ignores cases when comparing). For example, German lowercase letter ß is equivalent to ss. However, since ß is already lowercase, lower() method does nothing to it. But, casefold() converts it to ss. :param word: the string to be casefolded :return: case-folded string """ return word.casefold()
c917ab8661859ae29d8abecd9a7663b0b5112a63
17,099
import torch def predict_raw(loader, model): """Compute the raw output of the neural network model for the given data. Arguments ---------- loader : pyTorch DataLoader instance An instance of DataLoader class that supplies the data. model: subclass instance of pyTorch nn.Module The class containing the network model to evaluate, as per convention. Returns ---------- The network output tensor. """ model.eval() out = [] for i, (input, target) in enumerate(loader): input = input.cuda(non_blocking=True) target = target.cuda(non_blocking=True) with torch.no_grad(): input_var = torch.autograd.Variable(input) # compute output output = model(input_var) out.append(output.data) return out
cb812c0792629c46d5774d9f1f4090369e047b78
17,100
import json def _load_probably_json_substring(x): """ Weak method of extracting JSON object from a string which includes JSON and non-JSON data. Just gets the largest substring from { to } Works well for these cases, where click.CliRunner gives us output containing both stderr and stdout. """ return json.loads(x[x.index("{") : x.rindex("}") + 1])
fd701cb47d1ca40422de8c3424b8c6dce93ae540
17,101
def funql_template_fn(target): """Simply returns target since entities are already anonymized in targets.""" return target
a5f95bd6b7feabb4826fff826e6638cd242e04d6
17,102
import torch def to_tensor(im, dims=3): """ Converts a given ndarray image to torch tensor image. Args: im: ndarray image (height x width x channel x [sample]). dims: dimension number of the given image. If dims = 3, the image should be in (height x width x channel) format; while if dims = 4, the image should be in (height x width x channel x sample) format; default is 3. Returns: torch tensor in the format (channel x height x width) or (sample x channel x height x width). """ assert (dims == 3 or dims == 4) if dims == 3: im = im.transpose((2, 0, 1)) elif dims == 4: im = im.transpose((3, 2, 0, 1)) else: raise NotImplementedError return torch.from_numpy(im)
d19a0c0104f4dc9401f70235cadb7266ffd01332
17,103
def _get_size_verifier(min_x, min_y, mode): """ Depending on what the user wants, we need to filter image sizes differently. This function generates the filter according to the user's wishes. :param min_x: Minimal x-coordinate length of image. :param min_y: Minimal y-coordinate length of image. :param mode: If equal to 'area': Only filter images whose area is below min_x*min_y. If equal to 'individual' or anything else: Both sides of the image must be bigger than the given x and y coordinates. Automatically compares long to long and short to short side. :returns function that decides wether an image should be kept or discarded according to the size constraints. """ def by_area(width, height): return width * height >= min_x * min_y def by_both(width, height): long_side = max(width, height) short_side = min(width, height) long_given = max(min_x, min_y) short_given = min(min_x, min_y) return long_given <= long_side and short_given <= short_side def anything_goes(width, height): return True if mode == "area": return by_area elif mode == "individual": return by_both else: return anything_goes
86919399a94caa60ff780ccf5959fe2d43d6d2eb
17,104
import json def check_engine_op(op): """Check Engine API transaction. """ if op is not None and "logs" in op: logs = json.loads(op["logs"]) if isinstance(logs, str): logs = json.loads(logs) if "errors" not in logs: return True elif logs["errors"] == ["contract doesn't exist"]: # Ignore witness contract not existing, happens for ENG / BEE staking return True else: print(op["logs"]) print("Op has errors.") return False print("Op has no logs.") return True
e33960687dd5015bc0dcaf345aab36204e7e53af
17,105
import itertools def pdist_list(rings, node_sim): """ Defines the block creation using a list of rings at the graph level (should also ultimately include trees) Creates a SIMILARITY matrix. :param rings: a list of rings, dictionnaries {node : (nodelist, edgelist)} :param node_sim: the pairwise node comparison function :return: """ rings_values = [list(ring.values()) for ring in rings] nodes = list(itertools.chain.from_iterable(rings_values)) assert node_sim.compare(nodes[0][1], nodes[0][1]) == 1, "Identical rings giving non 1 similarity." sims = [node_sim.compare(n1[1], n2[1]) for i, (n1, n2) in enumerate(itertools.combinations(nodes, 2))] return sims
7ee0e9817c048ad5f1cf7e08ae9d980408c788c9
17,107
def format_values(data): """ Convert the data elements to their values """ for key, value in data.items(): for i in range(len(value)): data[key][i] = float(value[i].strip().split()[0]) return data
caed50b7b6a86be19d358405260c45eb9cf02105
17,108
from typing import Tuple from typing import Optional from typing import List import argparse def parse_args(args) -> Tuple[Optional[str], Optional[int], Optional[str], bool, List[str], Optional[str]]: """ Parse command line arguments: param: args: in form of --arg=value --path, optional, is the path of posix socket --port, optional the tcp port number --start-streams, optional True if to auto start all events of STREAM type --config-files, is a comma-separated list of hopeit apps config files relative or full paths --api-file, optional path to openapi.json file with at least openapi and info sections Example:: python web.py --port=8020 --path=/tmp/hopeit.01 --config-files=test.json Notes: --config-files argument is mandatory if --port and --path are not supplied the engine start on 8020 port by default """ parser = argparse.ArgumentParser(description="hopeit.py engine") parser.add_argument('--host') parser.add_argument('--path') parser.add_argument('--port') parser.add_argument('--start-streams', action='store_true') parser.add_argument('--config-files') parser.add_argument('--api-file') parsed_args = parser.parse_args(args=args) port = int(parsed_args.port) if parsed_args.port else 8020 if parsed_args.path is None else None config_files = parsed_args.config_files.split(',') return parsed_args.host, port, parsed_args.path, bool(parsed_args.start_streams), \ config_files, parsed_args.api_file
9efc626ecc3c8ad0a94bfa3b3fb3ac69b31d8b5e
17,109
def campbell_1d_az(Fs, z_, zlu, theta_s, psi_s, b, sd): """Soil moisture profile from Campbell function and microtopography See equations 4 and 5 in Dettmann & Bechtold 2015, Hydrological Processes """ # PEATCLSM microtopographic distribution if ((zlu - z_) * 100) >= (psi_s * 100): theta = theta_s else: theta = theta_s * (((zlu - z_) * 100) / (psi_s * 100)) ** (-1 / b) theta_Fs = (1 - Fs) * theta return theta_Fs
5a22998c3277d69e0bdfdaa676f66c803b93386b
17,111
import random def weighted(objs, key='weight', generator=random.randint): """Perform a weighted select given a list of objects. :param objs: a list of objects containing at least the field `key` :type objs: [dict] :param key: the field in each obj that corresponds to weight :type key: str :param generator: a number generator taking two ints :type generator: function(int, int) -> int :return: an object :rtype: dict """ acc = 0 lookup = [] # construct weighted spectrum for o in objs: # NOTE(cpp-cabrera): skip objs with 0 weight if o[key] <= 0: continue acc += o[key] lookup.append((o, acc)) # no objects were found if not lookup: return None # NOTE(cpp-cabrera): select an object from the lookup table. If # the selector lands in the interval [lower, upper), then choose # it. gen = generator selector = gen(0, acc - 1) lower = 0 for obj, upper in lookup: if lower <= selector < upper: return obj lower = upper
ea8b0ada198ae26a7ac54092c10a11daba3d18e0
17,112
def get_parameter_list_from_parameter_dict(pd): """Takes a dictionary which contains key value pairs for model parameters and converts it into a list of parameters that can be used as an input to an optimizer. :param pd: parameter dictionary :return: list of parameters """ pl = [] for key in pd: pl.append(pd[key]) return pl
38ab987fd2959c789f69a804f27e30bc86c7279b
17,113
def all_accept_criteria(candidate_paraphrases, **kargs): """Always accept proposed words. """ return candidate_paraphrases, None
745459e4fc432f666b2c763baafb69ea19a6181c
17,114
import torch def gpu_check(self): """[Check if GPU is available] Returns: [obj]: [torch.cuda.is_available()] """ train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') return train_on_gpu
42b6b1b256c7c30239bf2ee9b40431a710827ad2
17,116
import string def camel_to_underscore(name): """ convert a camel case string to snake case """ for char in string.ascii_uppercase: name = name.replace(char, '_{0}'.format(char)) return name.lower()
db88bd3938073ec65e58344ba7228c75fef646a5
17,118
import inspect def get_signature(obj): """ Get signature of module/class/routine Returns: A string signature """ name = obj.__name__ if inspect.isclass(obj): if hasattr(obj, "__init__"): signature = str(inspect.signature(obj.__init__)) return "class %s%s" % (name, signature) else: signature = "%s()" % name elif inspect.ismodule(obj): signature = name else: signature = str(inspect.signature(obj)) return name + signature return signature
9da9d7e431783b89a5e65b4940b118cd5538799c
17,119
def abs(x): """ Computes the absolute value of a complex-valued input tensor (x). """ assert x.size(-1) == 2 return (x ** 2).sum(dim=-1).sqrt()
3b3a23873923597767c35eb4b5f6da1bb054705b
17,121
def gcd_looping_with_divrem(m, n): """ Computes the greatest common divisor of two numbers by getting remainder from division in a loop. :param int m: First number. :param int n: Second number. :returns: GCD as a number. """ while n != 0: m, n = n, m % n return m
5b50692baa396d0e311b10f2858a1278a9366d09
17,122
def _round_bits(n: int, radix_bits: int) -> int: """Get the number of `radix_bits`-sized digits required to store a `n`-bit value.""" return (n + radix_bits - 1) // radix_bits
3e03385ee69f28b11e63885a80af48faa337697a
17,123
import subprocess import json def execute_command(*cmd: str, parse_json=False): """Execute a command. Args: *cmd (str): Parts of the command. parse_json (bool, optional): Parse the output as JSON. Defaults to `False`. Returns: str or dict: Output of command. """ res = subprocess.check_output(cmd) if parse_json: return json.loads(res) else: return res.decode()
8a8882982de323093b6e62bac8d4f74ccb39dddc
17,125
def format_scrub_warning(warning): """This function takes an internal representation of a warning and converts it into a SCRUB-formatted string. Inputs: - warning: Dictionary of finding data [dict] Outputs: - scrub_warning: SCRUB-formatted warning that can be written to the output file [string] """ # Format the warning scrub_warning = (warning.get('id') + ' <' + warning.get('priority') + '> :' + warning.get('file') + ':' + str(warning.get('line')) + ': ' + warning.get('query') + '\n') # Add the description description = '' for line in warning.get('description'): description = description + ' ' + line + '\n' scrub_warning = scrub_warning + description + '\n' return scrub_warning
3ec720da1f3a1aba8ecf605dc848636479ec5415
17,126
import functools def ignores(exc_type, returns, when=None): """Ignores exception thrown by decorated function. When the specified exception is raised by the decorated function, the value 'returns' is returned instead. The exceptions to catch can further be limited by providing a predicate which should return 'True' for exceptions that should be ignored. Parameters ---------- exc_type : type The exception type that should be ignored. returns : T The value that is returned when an exception is ignored. when : callable, optional A predicate that can be used to further refine the exceptions to be ignored. Examples -------- Ignore all `ValueError`s: >>> @ignores(ValueError, returns=1) ... def foo(e): ... raise e >>> foo(ValueError) 1 >>> foo(TypeError) Traceback (most recent call last): ... TypeError Ignore `ValueError`s with a specific message: >>> @ignores(ValueError, returns=1, when=lambda e: str(e) == "Not so bad.") ... def bar(e): ... raise e >>> bar(ValueError("Bad!")) Traceback (most recent call last): ... ValueError: Bad! >>> bar(ValueError("Not so bad.")) 1 """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except exc_type as e: if when is None or when(e): pass else: raise e return returns return wrapper return decorator
0c839c73218124fb988cea95fb5ee73abe7d5833
17,127
def build_hsts_header(config): """Returns HSTS Header value.""" value = 'max-age={0}'.format(config.max_age) if config.include_subdomains: value += '; includeSubDomains' if config.preload: value += '; preload' return value
9f94d87b1949f5c9e2f898466a8f5191f2327357
17,128
def validates(*names): """Decorate a method as a 'validator' for one or more named properties. Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection to be added to the collection. The function can then raise validation exceptions to halt the process from continuing, or can modify or replace the value before proceeding. The function should otherwise return the given value. """ def wrap(fn): fn.__sa_validators__ = names return fn return wrap
2ff6856aba142383c53f52b057f5ccbd1b682ebb
17,130
def argv_to_module_arg_lists(args): """Converts module ldflags from argv format to per-module lists. Flags are passed to us in the following format: ['global flag', '--module', 'flag1', 'flag2', '--module', 'flag 3'] These should be returned as a list for the global flags and a list of per-module lists, i.e.: ['global flag'], [['flag1', 'flag2'], ['flag1', 'flag3']] """ modules = [[]] for arg in args: if arg == '--module': modules.append([]) else: modules[-1].append(arg) return modules[0], modules[1:]
847597d09e56af4221792a9a176bddfea334e622
17,132
import os import yaml def get_species_categories( benchmark_type="FullChemBenchmark" ): """ Returns the list of benchmark categories that each species belongs to. This determines which PDF files will contain the plots for the various species. Args: benchmark_type: str Specifies the type of the benchmark (either FullChemBenchmark (default) or TransportTracersBenchmark). Returns: spc_cat_dict: dict A nested dictionary of categories (and sub-categories) and the species belonging to each. NOTE: The benchmark categories are specified in YAML file benchmark_species.yml. """ spc_categories = "benchmark_categories.yml" yamlfile = os.path.join(os.path.dirname(__file__), spc_categories) with open(yamlfile, "r") as f: spc_cat_dict = yaml.load(f.read(), Loader=yaml.FullLoader) return spc_cat_dict[benchmark_type]
8e8111f9d232abaef8e2fbda70ccfaad241ffc5e
17,134
import hashlib def sha256(s: str) -> str: """ >>> sha256('abc') 'ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad' """ return hashlib.sha256(s.encode("utf8")).hexdigest()
9bf815261c785e2061ae067b3dfa6cd3368d8f9a
17,135
def cubic_equation(b): """Algebraic Cubic Equations""" return lambda x : x**3 - x + b
6248d2b8fee59d860268d7d49a4e30c49f658214
17,136
def undo(rovar): """Translates rovarspraket into english""" for low, upper in zip("bcdfghjklmnpqrstvwxyz", "BCDFGHJKLMNPQRSTVWXYZ"): rovar = f"{upper}".join(f"{low}".join(rovar.split(f"{low}o{low}")).split(f"{upper}o{low}")) return rovar
451560539f7e98bc1fc89c712fa9026b48ecac4a
17,137
def tet_clean(s): """ Original code from Leighton Pritchard, leighton.pritchard@hutton.ac.uk redistributed and modified it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Checks that a passed string contains only unambiguous IUPAC nucleotide symbols. We are assuming that a low frequency of IUPAC ambiguity symbols doesn't affect our calculation. """ if not len(set(s) - set('ACGT')): return True return False
9ee39039459815da2619dbee19a0514b107a74a4
17,139
import time def get_template_s3_resource_path(prefix, template_name, include_timestamp=True): """ Constructs s3 resource path for provided template name :param prefix: S3 base path (marts after url port and hostname) :param template_name: File name minus '.template' suffix and any timestamp portion :param include_timestamp: Indicates whether to include the current time in the file name :return string: Url of S3 file """ if include_timestamp: key_serial = str(int(time.time())) template_name += "." + key_serial return "%s/%s.template" % (prefix, template_name)
61f1ef829cbe83e1032dd5995cedf33a4809f787
17,140
def med_all_odd(a): """Takes in a non-empty list of integers and returns True if all values are odd in the list.""" return all(x % 2 == 1 for x in a)
a7210b097798f2b4303a16ce02a79811e8c8ceef
17,142
def index(): """ get home page, you can define your own templates :return: """ return '<h2>Welcome to Proxy Pool System</h2>'
a8d54f565198f8c12fc67658bb3e9223f9922447
17,144
import torch def adj_to_seq(adj, device='cpu'): """ Convert a dense adjacency matrix into a sequence. Parameters ---------- adj : torch.Tensor The dense adjacency tensor. device : str, optional The device onto which to put the data. The default is 'cpu'. Returns ------- adj_seq : torch.Tensor The sequence representing the input adjacency tensor. """ B, N = adj.shape[0], adj.shape[1] adj_seq = torch.zeros(B,int(((N-1)*N)/2)).to(device) for b in range(B): for i in range(1,N): for j in range(i): adj_seq[b,i+j] = adj[b,i,j] return adj_seq
6b967962d5ba61a0ad45d5197ca23a7278fccca9
17,145
def combinations(c, d): """ Compute all combinations possible between c and d and their derived values. """ c_list = [c-0.1, c, c+0.1] d_list = [d-0.1, d, d+0.1] possibilities = [] for cl in c_list: for dl in d_list: possibilities.append([cl, dl]) return possibilities
28d1c0f8d7ef9ad59cadbbf8141b73decd2d9f94
17,147
import argparse import ast def parse_args(): """PARAMETERS""" parser = argparse.ArgumentParser('MindSpore PointNet++ Eval Configurations.') parser.add_argument('--batch_size', type=int, default=24, help='batch size in training') parser.add_argument('--data_path', type=str, default='../data/modelnet40_normal_resampled/', help='data path') parser.add_argument('--pretrained_ckpt', type=str, default='') parser.add_argument('--num_category', default=40, type=int, choices=[10, 40], help='training on ModelNet10/40') parser.add_argument('--num_point', type=int, default=1024, help='Point Number') parser.add_argument('--use_normals', type=ast.literal_eval, default=False, help='use normals') parser.add_argument('--process_data', action='store_true', default=False, help='save data offline') parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling') parser.add_argument('--platform', type=str, default='Ascend', help='run platform') parser.add_argument('--enable_modelarts', type=ast.literal_eval, default=False) parser.add_argument('--data_url', type=str) parser.add_argument('--train_url', type=str) return parser.parse_known_args()[0]
83cdcf29b09b7846d6ebf0277896a37b7b73a4f4
17,148
import os def get_stem(path: str) -> str: """Get the stem from the given path.""" basename = os.path.basename(path) stem, _ = os.path.splitext(basename) return stem
7c520c491c5061508758b5c32b10277d1d1858ff
17,149
import os def upload_to_do(files, args, logger): """ :param files: a list of local file paths :param args: original command line arguments passed to pygest executable :param logger: a python logger object :return: 0 on success """ # The --upload cmdline argument is --upload do bucket-name # args.upload[0] == "do" and args.upload[1] provides the bucket name, if it exists. logger.warn("Uploading to Digital Ocean (do) is not yet implemented.") return 0 bucket_name = "ms-mfs-cdn" if len(args.upload) < 2 else args.upload[1] session = boto3.session.Session() client = session.client('s3', region_name='sfo2', endpoint_url='https://sfo2.digitaloceanspaces.com', aws_access_key_id='LGVP4ATNUAYVORPC5GNC', aws_secret_access_key='xjJnk1nkwW2Tdee+QtzhCSc25Oj+1PC/kkjHuwNy5Bw') def bucket_holds_key(key_name): """ Return true if key is found, false if it's not, and raise an exception if we can't find out. """ try: client.select_bucket(bucket_name).Object(key_name).load() except ClientError as ee: if ee.response['Error']['Code'] == '404': return False else: raise else: return True for f in files: print("Trying to upload {} to s3://{}/{}".format(os.path.join(args.data, f), bucket_name, f)) f_local = os.path.join(args.data, f) try: if bucket_holds_key(f): logger.info("{} already exists in {}. Leaving it alone.".format(f, bucket_name)) else: client.Bucket(bucket_name).upload_file(f_local, f) # There is no json returned from this call. But an error raises an exception, so no news is good news. except ClientError as e: if e.response['Error']['Code'] == '403': # Permissions don't allow getting an object's HEAD logger.warn("You are not allowed to even check if a file exists in bucket ({}).".format(bucket_name)) logger.warn("Check your [default] key in ~/.aws/credentials, and verify AWS IAM permissions.") break else: raise return 0
9604a9d1ef0fdf75aab3a6ad3a0088d5363fdab4
17,150
def makeUnique(list): """ Removes duplicates from a list. """ u = [] for l in list: if not l in u: u.append(l) return u
02834bf5633c82f5f7428c03519ca68bee8916d4
17,151
def sample_labels(model, wkrs, imgs): """ Generate a full labeling by workers given worker and image parameters. Input: - `model`: model instance to use for sampling parameters and labels. - `wkrs`: list of worker parameters. - `imgs`: list of image parameters. Output: 1. list [img id, wkr id, label] as provided by `model.sample_label`. """ labels = [[ii, wi, model.sample_label(wkrs[wi], imgs[ii])] \ for ii in range(len(imgs)) for wi in range(len(wkrs))] return labels
1abd2d0d087f7ce452db7c899f753366b148e9e6
17,152
from typing import List def get_balanced_grouping_allocation( w: int, min_value: int, max_value: int ) -> List[int]: """ This algorithm is much less costly in terms of time complexity, but will result in less randomness. It will always prefer the balanced or nearly-balanced group configuration, while still accommodating for prime numbers of channel sizes, as well as achieving this goal in the least number of groups possible (just in case there is a limit on the total number of groups). Keep in mind that we would like as much entropy as possible, within relatively reasonable limits and boundaries. Note that while this algorithm will attempt to select the most balanced configuration, it will not always choose the most balanced due to the nature of the algorithm itself. Instead, it sacrifices the choice of being the most balanced in order to allow itself to self-adjust and select near-optimal configurations for prime numbers of channel sizes as well. Sometimes, this might lead to the configuration being nearly balanced, but not exactly. While it is true that the most balanced configuration would have the most entropy (proportional to number of ways of possible arrangements) due to the pattern seen in Pascal's triangle, an algorithm that prefers the most balanced configuration might not be able to configure itself for prime numbers of channel sizes to fit within the restrictions, which would be disastrous (assuming that such an algorithm would execute trial-and-error divisions for all positive integers >= 2 until either sqrt(w) or w as part of the usual factorization procedure). If there exists an algorithm out there that we find that would both accommodate for prime numbers of channel sizes and always prefer the most balanced configuration, we would modify this function to adopt such an algorithm instead. If such an algorithm exists, it should not rely on constant minute/minor readjustments and trial-and-errors of shaving off and topping up certain "buckets"/groups using a try-catch method or a conditional while loop procedure/mechanism which might take a much longer time to finish since that would completely defeat the point of this algorithm being fast/quick enough as compared to the other one. While it is true that multiples 3, 4, and 5 are more numerous/denser than multiples of prime numbers (at least for positive integers between 3 and 1000 inclusive), it is only about a 10% difference or a 1.5x multiplier (~60:40). Hence, even if this difference offsets the amount of entropy lost by the "shaving-off and topping-up" sequence, sacrificing this possible time save in favor of more balanced configurations at the cost of slower performance for such a marginal gain/benefit does not seem to be acceptable enough, at least according to me (@jamestiotio). Hence, I deem that this approach would be the most appropriate, at least for this very specific case and for the time being. Ideally, such an algorithm should also not skimp/slack off on keeping the number of groups as low as possible in the name of balance. As long as the guarantee conditions of min_value = N and max_value >= 2N - 1 are satisfied, no groups will violate either bound. Keep in mind that w > max_value most of the time. Might want to consider introducing more randomness in some form by using some kind of method here. """ x = -(w // -max_value) q, mod = divmod(w, x) a = [q for _ in range(x)] for i in range(mod): a[i] += 1 return a
9171b5c1c6759a08fc799ca9073cd3a2ee22b6b4
17,153
def all_equal(lst): """ Returns True if all elements of lst are the same, False otherwise ex. all_equal(['S,'S','S']) returns True """ return len(set(lst)) == 1
221f90cb763f35ddde3fa0a3e33cb03c9dfae0bc
17,154
import zipfile import re import os def zipfile_to_dictionary(filename): """ Takes in a zip file and returns a dictionary with the filenames as keys and file objects as values Inputs: ::\n file: the concerned zip file Outputs: ::\n result: the returned dictionary """ zf = zipfile.ZipFile(filename,'r') files_list = zf.namelist() dictionary = {} names_dictionary = {} for i in files_list: if re.search(r"\.(jpe?g|png|gif|bmp|mp4|mp3)", i, re.IGNORECASE) and not i.startswith('__MACOSX'): f = zf.read(i) name = os.path.basename(i).split('.')[0] dictionary[name] = f names_dictionary[name] = os.path.basename(i) return dictionary, names_dictionary
04a105618e272e77c62221a4a009fe14a013ad05
17,155
def default_state_progress_report(n_steps, found_states, all_states, timestep=None): """ Default progress reporter for VisitAllStatesEnsemble. Note that it is assumed that all states have been named. Parameters ---------- n_steps : int number of MD frames generated so far found_states : iterable the set of states that have been found all_states : iterable the set of all states of interest timestep : float or quantity the timestep (optional). If given, the amount of time simulated will be reported along with the number of MD frames. Returns ------- str : formatted string with information about progress so far """ report_str = "Ran {n_steps} frames" if timestep is not None: report_str += " [{}]".format(str(n_steps * timestep)) report_str += (". Found states [{found_states}]. " "Looking for [{missing_states}].\n") found_states_str = ",".join([s.name for s in found_states]) # list comprehension instead of sets (to preseve order) missing_states = [s for s in all_states if s not in found_states] missing_states_str = ",".join([s.name for s in missing_states]) return report_str.format(n_steps=n_steps, found_states=found_states_str, missing_states=missing_states_str)
b0f740d18218dd9542704d03edcd4b6575a2c14e
17,159
import pandas def integrate(s1, s2): """Integrate two records of feature subsets. Parameters ---------- s1 : pandas.Series First records. s2 : pandas.Series Second records. Returns ------- pandas.Series Integrated records. Examples -------- >>> s1 = from_arrays([[True, False]], [0.2], features=['A', 'B']) >>> s2 = from_arrays([[False, True]], [0.4], features=['A', 'B']) >>> s1 A B True False 0.2 dtype: float64 >>> s2 A B False True 0.4 dtype: float64 >>> integrate(s1, s2) A B True False 0.2 False True 0.4 dtype: float64 >>> s3 = from_arrays([[True, True]], [0.8], features=['A', 'C']) >>> s3 A C True True 0.8 dtype: float64 >>> integrate(s1, s3) A B C True False False 0.2 True 0.8 dtype: float64 """ s1 = s1.copy() s2 = s2.copy() and_features = [feature for feature in s1.index.names if feature in s2.index.names] xor1_features = [feature for feature in s1.index.names if feature not in and_features] xor2_features = [feature for feature in s2.index.names if feature not in and_features] all_features = and_features + xor1_features + xor2_features flags = s1.index.to_frame() flags = flags.assign(**{feature: False for feature in xor2_features}) flags = flags[all_features] s1.index = pandas.MultiIndex.from_frame(flags) flags = s2.index.to_frame() flags = flags.assign(**{feature: False for feature in xor1_features}) flags = flags[all_features] s2.index = pandas.MultiIndex.from_frame(flags) return pandas.concat([s1, s2], verify_integrity=True)
34fd3bfdea613b09a4fdc6054de35a755656c01e
17,160
import collections def mut_type(WT, A, T, G, C): """ returns number of each type of mutation as columns Used on one sequence position at a time so only one of the four wtNT will not be 0 for an individual function call, but combining all outputs for all sequence positions gives the total number of each type """ wtNT = WT[0] mutsDict = {'A':{}, 'T':{}, 'G':{}, 'C':{}} #nested dict to be used for tracking all 12 types of substitutions nts = 'ATGC' for wt in nts: for mut in nts: mutsDict[wt][mut] = 0 mutsDict[wtNT]['A'] = A mutsDict[wtNT]['T'] = T mutsDict[wtNT]['G'] = G mutsDict[wtNT]['C'] = C outDict = collections.OrderedDict() for nt in nts: for mut in nts: if mut!=nt: outDict[f'{nt}->{mut}'] = mutsDict[nt][mut] return outDict
de3e5e99c11d8f86047c667a6f51eaee0522c5ff
17,161
def childrenList(cursor,cd_tax): """ Retrieve all the children of a taxon in the database Parameters: ---------- cursor: Psycopg2 cursor cursor for the database connection cd_tax: Int idenfier of the taxon for which we search the children taxa Returns: ------- all_children: List(Int) list of identifiers of the children taxa """ foundMore = True all_children=[cd_tax] new_children = [cd_tax] SQL = "SELECT cd_tax FROM taxon WHERE cd_sup IN (SELECT UNNEST( %s ))" while foundMore: cursor.execute(SQL,[new_children]) res=cursor.fetchall() new_children = [r[0] for r in res] all_children=all_children+new_children if(len(new_children)==0): foundMore=False all_children.sort() return all_children
ca50ac590674d19321144f77b54ec57d8dd49bb4
17,162
def path_sum(root, target_sum): """ Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum. """ def is_leaf(node): return node.left is None and node.right is None def leaf_nodes(node, parent_path_sum): if node is None: return new_sum = parent_path_sum + node.val if is_leaf(node): yield (node, new_sum) for n in leaf_nodes(node.left, new_sum): yield n for n in leaf_nodes(node.right, new_sum): yield n for node, path_sum in leaf_nodes(root, 0): if path_sum == target_sum: return True return False
0971227d42abb3a0cde1c9050dcca39731858679
17,164
def IsAioNode(tag): """Returns True iff tag represents an AIO node.""" return tag.startswith('aio_nodes.')
6603f4bca75a463ca651b44615a11c3dd29ca487
17,168
import re def regex_from_rule(rule): """为一个 rule 生成对应的正则表达式 >>> regex_from_rule('/<provider>/songs') re.compile('^/(?P<provider>[^\\\/]+)/songs$') """ kwargs_regex = re.compile(r'(<.*?>)') pattern = re.sub( kwargs_regex, lambda m: '(?P<{}>[^\/]+)'.format(m.group(0)[1:-1]), rule ) regex = re.compile(r'^{}$'.format(pattern)) return regex
dcc90a75875f80271da7333c1f378ff81b9aaf0b
17,169
def _parse_name(wot_identifier): """ Parse identifier of the forms: nick nick@key @key :Return: nick, key. If a part is not given return an empty string for it. >>> _parse_name("BabcomTest@123") ('BabcomTest', '123') """ split = wot_identifier.split('@', 1) nickname_prefix = split[0] key_prefix = (split[1] if split[1:] else '') return nickname_prefix, key_prefix
7a33f5247e345175bad92fc8bf040eddc8b65804
17,171