content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def clean_euler_path(eulerian_path: list) -> list: """Cleans a Eulerian path so that each edge (not directed) appears only once in the list. If a edge appears more than once, only the first occurrence is kept. Arguments: eulerian_path {list} -- Eulerian path Returns: list -- cleaned Eulerian path """ # noqa path = [] for edge in eulerian_path: if edge not in path and edge[::-1] not in path: path.append(edge) return path
207d5d5ef6747b5d537c6c8502dd536638ccee9d
22,644
def frame_idx(fname): """Get frame index from filename: `name0001.asc` returns 1""" return int(fname[-8:-4])
af3e6a4c693fa77b7516e5560c20f50d3a5f925a
22,645
import pickle def load_calib(filename): """ Loads calibration parameters from '.pkl' file. Parameters ---------- filename : str Path to load file, must be '.pkl' extension Returns ------- calib_params : dict Parameters for undistorting images. """ # read python dict back from the file pkl_file = open(filename, 'rb') try: calib_params = pickle.load(pkl_file) except: raise IOError("File must be '.pkl' extension") pkl_file.close() return calib_params
93700abe123df3ebcd17bddf16a6acd1a42ea1a7
22,647
import re def get_molecular_barcode(record, molecular_barcode_pattern): """Return the molecular barcode in the record name. Parameters ---------- record : screed record screed record containing the molecular barcode molecular_barcode_pattern: regex pattern molecular barcode pattern to detect in the record name Returns ------- barcode : str Return molecular barcode from the name,if it doesn't exit, returns None """ found_molecular_barcode = re.findall(molecular_barcode_pattern, record['name']) if found_molecular_barcode: return found_molecular_barcode[0][1]
1507cf7ad3c39c02b6dfdfdd12c6800155346253
22,648
import shlex def read_from_file(path): """ Reads plot file Returns data sets data_set: [(f,val)] -> 1 2\n2 5\n3 8 data_sets: [data_set] -> data1\n\n\ndata2 """ opened_file = open(path, "rU") joined = opened_file.read() opened_file.close() datasets = [[shlex.split(line) for line in dataset.split("\n")] for dataset in joined.split("\n\n\n")] return [[(tokens[0], float(tokens[1])) for tokens in dataset] for dataset in datasets]
3049e6080a259bccf83a914c4cd4c19310744b16
22,650
import string import random def generate_random_password() -> str: """Generates truly random password""" letters = string.ascii_letters password = ''.join(random.choice(letters) for _ in range(random.randint(5, 14))) return password
6bbfead5832b48cf9bd4e4d9af5a2a0463e776ec
22,652
def int_to_bytes(x): """Changes an unsigned integer into bytes.""" return x.to_bytes((x.bit_length() + 7) // 8, 'big')
5f441a6a5767d8cd1292e8976a24b0c9c4ca157e
22,653
def extend_xmax_range(xmax_bins, templates): """ Copy templates in empty xmax bins, helps to prevent problems from reaching the edge of the interpolation space. :param templates: dict Image templates :return: dict Extended image templates """ # Create dictionary for our new templates extended_templates = dict() # Loop over image templates for key in templates: min_key_bin = list() key_copy = 0 # For each entry loop forward over possible xmax entries to check if they # exist for xb in xmax_bins: key_test = (key[0], key[1], key[2], key[3], xb, key[5]) # keep looping until we have found the largest xmax value if (key_test not in extended_templates.keys()) and \ (key_test not in templates.keys()): min_key_bin.append(key_test) else: key_copy = key_test break # Then copy in the highest xmax valid template into these etries for k in min_key_bin: if key_copy != 0: extended_templates[k] = templates[key_copy] min_key_bin = list() key_copy = 0 # Now we just do the same in reverse for xb in reversed(xmax_bins): key_test = (key[0], key[1], key[2], key[3], xb, key[5]) if (key_test not in extended_templates.keys()) and \ (key_test not in templates.keys()): min_key_bin.append(key_test) else: key_copy = key_test break for k in min_key_bin: if key_copy != 0: extended_templates[k] = templates[key_copy] # Copy new template entries into the original templates.update(extended_templates) return templates
ac622fde3a0bd7c8919b8fd63f459596ea13d88c
22,654
import uuid def get_uuid(data): """Compute a UUID for data. :param data: byte array """ return str(uuid.uuid3(uuid.NAMESPACE_URL, data))
f91e6e76c14736c1678bc000b7246ac6b518171f
22,656
from collections import OrderedDict def aggregate_instruction_forms(instruction_forms): """Hierarhically chains instruction forms Combines operand types that differ only by a single operand together """ nested_operand_types = { ("1", "imm8"), ("3", "imm8"), ("rel8", "rel32"), ("imm8", "imm16"), ("imm8", "imm32"), ("imm16", "imm32"), ("imm32", "imm64"), ("al", "r8"), ("ax", "r16"), ("eax", "r32"), ("rax", "r64") } def find_single_operand_difference(form, other_form): """Checks if two forms differ only by a single operand type and returns the operand number""" if form == other_form: return None if len(form.operands) != len(other_form.operands): return None different_operand_numbers = \ list(filter(lambda n: form.operands[n].type != other_form.operands[n].type, range(len(form.operands)))) if len(different_operand_numbers) == 1: return different_operand_numbers[0] else: return None new_instruction_forms = OrderedDict() for form in instruction_forms: for other_form in instruction_forms: n = find_single_operand_difference(form, other_form) if n is not None and (form.operands[n].type, other_form.operands[n].type) in nested_operand_types: break else: new_instruction_forms[form] = [] for form in instruction_forms: for other_form in instruction_forms: n = find_single_operand_difference(form, other_form) if n is not None and (form.operands[n].type, other_form.operands[n].type) in nested_operand_types: assert other_form.isa_extensions == form.isa_extensions assert other_form.mmx_mode == form.mmx_mode assert other_form.xmm_mode == form.xmm_mode if other_form in new_instruction_forms: new_instruction_forms[other_form].insert(0, (n, form)) break return new_instruction_forms
95f449fed39d74f7d9ca0b8b17f844760d734379
22,657
def ssqrange(charge, sz, nsingle): """ Make a list giving all possible :math:`S^{2}` values for given charge and :math:`S_{z}`. Parameters ---------- charge : int Value of the charge. sz : int Value of sz. nsingle : int Number of single particle states. Returns ------- list List of all possible :math:`S^{2}` values for given charge and :math:`S_{z}`. """ szmax = min(charge, nsingle-charge) return list(range(abs(sz), szmax+1, +2))
b05451081b0b13dd8f43a14e88eaf591ba6324ec
22,658
def split(iterable, **split_options): """Perform a split on iterable. This method is highly inspired in the `iter` global method (in conjunction with its __iter__ counterpart method) for iterable classes. :param iterable: An iterable, which will typically be a Storage<Collection> :param split_options: The optional additional arguments to the split method. May be ignored. :return: A collection of Split, or something similar. If iterable is not a Storage<Collection>, returns a tuple with a single element, the iterable argument itself """ try: # Default behaviour is to use the data model `split` method return iterable.split(**split_options) except AttributeError: # Otherwise, simply return a iterable which yields a single item return iterable,
ac3e8743393d66ab9553db0e4735e2425d97518c
22,659
def scale_vector(vector, scale): """Scale a 3D vector's components by scale.""" return vector[0] * scale, vector[1] * scale, vector[2] * scale
292269d2e54db362b823c547b0415f53d93e3e4c
22,661
import requests import json def query_clone_infos_airlab(ids): """ Use rauls API to query clone info from clone ids :param ids: :return: dict with the response """ resp_dict = dict() for id in ids: resp = requests.get('http://airlaboratory.ch/apiLabPad/api/getInfoForClone/' + str(id)) resp_entry = json.loads(resp.text) if (resp_entry != '0') & (len(resp_entry) > 0): resp_dict[id] = resp_entry[0] return resp_dict
251f0c86ddc341ed9dc7cc235837abb1bc23e437
22,663
import math import torch def log_mv_gamma(p, a): """Simple implementation of the log multivariate gamma function Gamma_p(a) Args: p (int): dimension a (float): argument Will be made obsolete by https://github.com/pytorch/pytorch/issues/9378 """ C = p * (p - 1) / 4 * math.log(math.pi) return float(C + torch.lgamma(a - 0.5 * torch.arange(p, dtype=torch.float)).sum())
70fb7cb7e55387d286c1d7bb5a0bad6fdaa1f376
22,664
def num_round(x) -> str: """comma separated with only the largest 2 units as non-zero""" if x > 100: x = round(x, 2 - len(str(int(x)))) elif x < 100: x = round(x, 3 - len(str(int(x)))) elif abs(x) > 10: x = round(x, -1) else: x = x x = int(round(x)) return f'{x:,}'
d1b6268ee8bc419e649f8fd0b399e8fa5413be5c
22,665
def getYearDigits(a_year): """Returns year digits , given year""" return abs(a_year % 100)
f0ad0c65a77ade0514ca97d6b60165d433421e9e
22,666
def lr_lambda(epoch, base=0.99, exponent=0.05): """Multiplier used for learning rate scheduling. Parameters ---------- epoch: int base: float exponent: float Returns ------- multiplier: float """ return base ** (exponent * epoch)
24ac54a8e54fa64e44ab941125a72ac4bbb269fd
22,667
def partition(pred, iterable): """ Returns tuple of allocated and unallocated systems :param pred: status predicate :type pred: function :param iterable: machine data :type iterable: list :returns: ([allocated], [unallocated]) :rtype: tuple .. code:: def is_allocated(d): allocated_states = ['started', 'pending', 'down'] return 'charms' in d or d['agent_state'] in allocated_states allocated, unallocated = utils.partition(is_allocated, [{state: 'pending'}]) """ yes, no = [], [] for i in iterable: (yes if pred(i) else no).append(i) return (yes, no)
997466771c67995f0bd7f5d7bbbecf73c444ccdd
22,668
import string def isValidMapKey(key): """Returns ``True`` if the given string is a valid key for use as a colour map or lookup table identifier, ``False`` otherwise. A valid key comprises lower case letters, numbers, underscores and hyphens. """ valid = string.ascii_lowercase + string.digits + '_-' return all(c in valid for c in key)
2e9167c3351b6c80bcc12c129279c4048f511e24
22,670
def factory_class_name(model_class_name): """Return factory class name from model class""" return model_class_name + 'Factory'
acfde8e129fb44f2db108a778b15938efbcc237b
22,671
import pathlib import json import re def check_markers(test_mode=False): """Validate markers in PinNames.h files""" mbed_os_root = pathlib.Path(__file__).absolute().parents[3] errors = [] with ( mbed_os_root.joinpath("targets", "targets.json") ).open() as targets_json_file: targets_json = json.load(targets_json_file) if test_mode: search_dir = pathlib.Path(__file__).parent.joinpath('test_files').absolute() else: search_dir = mbed_os_root.joinpath('targets') for f in search_dir.rglob("PinNames.h"): with open(f) as pin_names_file: pin_names_file_content = pin_names_file.read() target_list_match = re.search( "\/* MBED TARGET LIST: ([0-9A-Z_,* \n]+)*\/", pin_names_file_content ) marker_target_list = [] if target_list_match: marker_target_list = list( re.findall( r"([0-9A-Z_]{3,})", target_list_match.group(1), re.MULTILINE, ) ) if not marker_target_list: print("WARNING: MBED TARGET LIST marker invalid or not found in file " + str(f)) errors.append({ "file": str(f), "error": "marker invalid or not found"}) continue for target in marker_target_list: target_is_valid = False if target in targets_json: target_is_valid = True if "public" in targets_json[target]: if targets_json[target]["public"] == False: target_is_valid = False if not target_is_valid: print("WARNING: MBED TARGET LIST in file " + str(f) + " includes target '" + target + "' which doesn't exist in targets.json or is not public") errors.append({ "file": str(f), "error": "target not found"}) return errors
27f303542d83f99c75df5d1804ec103f1377959d
22,672
import os def remove(base, target): """ Remove file from given directory if exists. base: base directory of target file target: path to target file relative to base directory """ remove = os.path.join(base, target) if not os.path.isfile(remove): return False os.remove(remove) return True
7a6edaa13edb7ef51520a56f7a97fa8c5d388091
22,673
import argparse def get_arguments(): """ Obtains command-line arguments. :rtype argparse.Namespace """ parser = argparse.ArgumentParser() parser.add_argument( '--model-input', required=True, metavar='MODEL-INPUT', help='read the base model from Pickle file %(metavar)s') parser.add_argument( '--data-input', type=argparse.FileType('rU'), required=True, metavar='DATA-INPUT', help='read input data from CSV file %(metavar)s') parser.add_argument( '--bootstrapped-sample-input', required=True, metavar='BOOTSTRAPPED-SAMPLE-INPUT', help=('read bootstrapped basis matrix samples from Feather file ' '%(metavar)s')) parser.add_argument( '--iterations', type=int, default=2000, help=('run the analysis with %(metavar)s bootstraps (default: ' '%(default)s)')) parser.add_argument( '--model-output', required=True, metavar='MODEL-OUTPUT', help='output the model to Pickle file %(metavar)s') parser.add_argument( '--basis-output', required=True, metavar='BASIS-OUTPUT', help='output the model to CSV file %(metavar)s') parser.add_argument( '--score-output', required=True, metavar='SCORE-OUTPUT', help='output the model to CSV file %(metavar)s') parser.add_argument( '--lower-quantile-threshold', type=float, metavar='LOWER-THRESHOLD', default=0.25, help=('use %(metavar)s as the lower quantile threshold; the ' 'calculated threshold per factor will be the %(metavar)sth ' 'percentile of the highest median basis matrix entry (default ' '%(default)s)')) parser.add_argument( '--upper-quantile', type=float, metavar='UPPER', default=0.75, help=('use %(metavar)s as the quantile threshold to determine whether ' 'a variable on a factor has crossed the lower quantile ' 'threshold (default %(default)s)')) parser.add_argument( '--log', metavar='LOG', help='write logging information to %(metavar)s') return parser.parse_args()
35da3f18e6a2b13ca92079a8360cfd6b4240fadb
22,674
import numpy def first_order_rotation(rotvec): """First order approximation of a rotation: I + skew(rotvec) cfo, 2015/08/13 """ R = numpy.zeros((3,3), dtype=numpy.float64) R[0,0] = 1.0 R[1,0] = rotvec[2] R[2,0] = -rotvec[1] R[0,1] = -rotvec[2] R[1,1] = 1.0 R[2,1] = rotvec[0] R[0,2] = rotvec[1] R[1,2] = -rotvec[0] R[2,2] = 1.0 return R
0b7543574a97b8bc902694eedf1c1ed871d1ed84
22,675
def greeting_service(create_service_meta): """ Greeting service test instance with `log` dependency mocked """ return create_service_meta("log")
a7d732b610f2d8a311e8299eba4a1c99b93f7351
22,676
def enum_rocket(): """ 返回王炸 """ return [('BJ-CJ', 0)]
c8e303c54d77a8b2ee71a97cbb98f2dd743391b7
22,677
def _group_counts_to_group_sizes(params, group_counts): """Convert numbers of groups to sizes of groups.""" _, out_channels, _ = params group_sizes = [out_channels // count for count in group_counts] return group_sizes
fdcf6c86dd4c90507d1cc2f9b5968d5110df264b
22,678
def __read_sequence_ids(data): """ Reads SequenceIDs.txt (file included in OrthoFinder Output) and parses it to a dict :param data: list of lines in SequenceIDs.txt :return: dict with key: OrthoFinder ID en value: the proper name """ output = {} for l in data: if l.strip() != '': k, v = l.split(': ') output[k] = v return output
7855cf70398e22c45516b822ceff3ec9702ce5e5
22,679
def snap_value(input, snap_value): """ Returns snap value given an input and a base snap value :param input: float :param snap_value: float :return: float """ return round((float(input) / snap_value)) * snap_value
1b2f967ecca2a151c5229cbb9eb57d6c67853925
22,680
from typing import Union from typing import List from typing import Dict import json def convert_argstr(s: str) -> Union[None, bool, int, str, List, Dict]: """Convert string to suitable argument type""" s = s.strip() if s in ('None', 'null'): return None elif s in ('True', 'true'): return True elif s in ('False', 'false'): return False try: return int(s) except ValueError: if s[:1] in ('"', "'"): return s.strip('" \'') else: try: return json.loads(s) except json.JSONDecodeError: return s
416f6fe12aa58a8d7162a3ca0a70b01e5d0c90c2
22,681
from pathlib import Path def fixture(name: str): """ Construct an absolute path to the fixture directory """ return str(Path(Path(__file__).parent.absolute(), 'fixtures', name))
0055b7076615893531977afaba49e421c3440224
22,682
def _read_node( data, pos, md_total, val_total ): """ 2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2 The quantity of child nodes. The quantity of metadata entries. Zero or more child nodes (as specified in the header). If a node has no child nodes, its value is the sum of its metadata entries. So, the value of node B is 10+11+12=33, and the value of node D is 99. However, if a node does have child nodes, the metadata entries become indexes which refer to those child nodes. A metadata entry of 1 refers to the first child node, 2 to the second, 3 to the third, and so on. The value of this node is the sum of the values of the child nodes referenced by the metadata entries. If a referenced child node does not exist, that reference is skipped. A child node can be referenced multiple time and counts each time it is referenced. A metadata entry of 0 does not refer to any child node. 2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2 A---------------------------------- B----------- C----------- D----- Node C has one metadata entry, 2. Because node C has only one child node, 2 references a child node which does not exist, and so the value of node C is 0. Node A has three metadata entries: 1, 1, and 2. The 1 references node A's first child node, B, and the 2 references node A's second child node, C. Because node B has a value of 33 and node C has a value of 0, the value of node A is 33+33+0=66. So, in this example, the value of the root node is 66. """ node_value = 0 child_count = data[ pos ] pos += 1 md_count = data[ pos ] pos += 1 child_vals = [ ] for i in range( child_count ): pos, md_total, child_val, val_total = _read_node( data, pos, md_total, val_total ) child_vals.append( child_val ) for m in range( md_count ): md_val = data[ pos ] md_total += md_val if child_count == 0: node_value += md_val else: try: node_value += child_vals[ md_val - 1 ] except IndexError: pass pos += 1 print( 'nv =', node_value ) val_total += node_value return ( pos, md_total, node_value, val_total )
e2fd6e53bb7a9078486794ff4f9e6b98639aa67b
22,683
import json def make_policy(bucket_name, prefix, allow_get_location=False): """Produces a S3 IAM text for selective access of data. Only a prefix can be listed, gotten, or written to when a credential is subject to this policy text. """ bucket_arn = "arn:aws:s3:::" + bucket_name prefix_arn = "arn:aws:s3:::{0}/{1}/*".format(bucket_name, prefix) structure = { "Version": "2012-10-17", "Statement": [ { "Action": ["s3:ListBucket"], "Effect": "Allow", "Resource": [bucket_arn], "Condition": {"StringLike": {"s3:prefix": [prefix + '/*']}}, }, { "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject"], "Resource": [prefix_arn] }]} if allow_get_location: structure["Statement"].append( {"Action": ["s3:GetBucketLocation"], "Effect": "Allow", "Resource": [bucket_arn]}) return json.dumps(structure, indent=2)
8c76f0be774b7b0de2169552cf4ba5a961e83023
22,684
from datetime import datetime def parseDate(timestamp): """Parses an EXIF date.""" return datetime.strptime(timestamp, '%Y:%m:%d %H:%M:%S')
d59d2c0b1c1370035a93ed8e4a93834db6915646
22,685
import torch def one_hot_vector(length, index, device=torch.device('cpu')): """ Create a one-hot-vector of a specific length with a 1 in the given index. Args: length: Total length of the vector. index: Index of the 1 in the vector. device: Torch device (GPU or CPU) to load the vector to. Returns: Torch vector of size [1 x length] filled with zeros, and a 1 only at index, loaded to device. """ vector = torch.zeros([1, length]).to(device) vector[0, index] = 1. return vector
88dca8d63792b5a8eb58abbf12eb2800e28b3264
22,686
def partial(func, *args, **kwargs): """ Create a partial function during processing of a filter pipeline, like in x:fun(a,b) - the returned function represents application of (a,b) to a `fun`; whatever argument will be supplied later, it will be PREPENDED to the argument list, unlike in functools.partial() which APPENDS new arguments at the end. """ def newfunc(*newargs): return func(*(newargs + args), **kwargs) return newfunc
d5075c21a922524370baa3bd025bb7bb01291902
22,687
def XORs(alen,array): #incorrect, and too slow """xors=0 donedex=[] for i in xrange(alen): for j in xrange(1,alen): if (i!=j) and ((i,j) not in donedex): if (i^j)%2==1: donedex.append((i,j)) donedex.append((j,i)) xors+=1""" #correct, but too slow #return sum([1 if (array[i]^array[j])%2==1 else 0 for i in xrange(alen) for j in xrange(i+1, alen)]) evens = sum([1 if i%2==0 else 0 for i in array]) return evens*(alen-evens)
aec2c5d96d5f61fb3bf22c5f28428481ff813c6a
22,690
def get_minutes(seconds): """Convert seconds to minutes.""" return seconds // 60
7ca290c4e581a786af32f333d1c1f771de6b91c9
22,691
def compat_middleware_factory(klass): """ Class wrapper that only executes `process_response` if `streaming` is not set on the `HttpResponse` object. Django has a bad habbit of looking at the content, which will prematurely exhaust the data source if we're using generators or buffers. """ class compatwrapper(klass): def process_response(self, req, resp): if not hasattr(resp, 'streaming'): return klass.process_response(self, req, resp) return resp return compatwrapper
b6a9aefee305b808481e3611033b7660e6113904
22,692
def make_topic(*levels): """Create a valid topic. >>> make_topic('foo', 'bar') 'foo/bar' >>> make_topic(('foo', 'bar')) 'foo/bar' """ if len(levels) == 1 and isinstance(levels[0], tuple): return make_topic(*levels[0]) return "/".join(levels)
6ab9218864d9b6a2eaefcc994414a8e089bbbd0e
22,693
def parse_type(s): """ Checks the type of 's' and adds plics to it except for NULL and NOT NULL. :param s: value to be checked. :return: Parsed value """ string = "'%s'" % s return string.upper()
83ff8a90acbb21345ae68f7be897f31fea206ece
22,694
import math def sumlog(v1, v2): """Returns the sum of two logspaced values in logspace.""" if v1 < v2: v1, v2 = v2, v1 return math.log(1 + math.exp(v2 - v1)) + v1
3942f5a05da47065161c29164d57b0f22bda478a
22,695
import argparse def parse_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description="Create amplicon variant table.") parser.add_argument('master_file', help="Amplicon master file.") parser.add_argument('amplicon_coverage', help='Per-amplicon mean coverage.') parser.add_argument('-v', '--variants', nargs='+', required=True, help='snpEff output file') parser.add_argument('-a', '--list_all', action='store_true', help='Output all amplicons.') return parser.parse_args()
929dc57397b7490924c30a1f088f688c87dfb54c
22,697
import re def split_names(voters): """Representative(s) Barbuto, Berger, Blake, Blikre, Bonner, Botten, Buchanan, Burkhart, Byrd, Campbell, Cannady, Childers, Connolly, Craft, Eklund, Esquibel, K., Freeman, Gingery, Greear, Greene, Harshman, Illoway, Jaggi, Kasperik, Krone, Lockhart, Loucks, Lubnau, Madden, McOmie, Moniz, Nicholas, B., Patton, Pederson, Petersen, Petroff, Roscoe, Semlek, Steward, Stubson, Teeters, Throne, Vranish, Wallis, Zwonitzer, Dn. and Zwonitzer, Dv.""" voters = voters.split(':', 1)[-1] voters = re.sub(r'(Senator|Representative)(\(s\))?', "", voters) voters = re.sub(r'\s+', " ", voters) # Split on a comma or "and" except when there's a following initial voters = [ x.strip() for x in re.split(r'(?:,\s(?![A-Z]\.))|(?:\sand\s)', voters) ] return voters
77e0abbf441b45757a638017199b773cb73d9976
22,698
def run_nlp(tweets): """Processes the tweet text through the model.""" results = '' for tweet in tweets: results += tweet['text'] + '; ' return results
024ce527767fedae8294a83da2c3cd8d81befcc1
22,699
import requests def _get_bin_count(fpath, delimiter=',', encoding='ISO-8859-1'): """ Gets the number of bins in the file. :param fpath: A path or url for the file (if a url it must include `http`, and if a file path it must not contain `http`). :type fpath: string :param delimiter: The delimiter between items in the file, defaults to ','. :type delimiter: string :param encoding: The encoding for the file, defaults to 'ISO-8859-1'. :type encoding: string :return: The number of bins in the file. :rtype: int """ bins = 0 if 'http' in fpath: req = requests.get(fpath) for line in req.iter_lines(): try: if float(line.decode(encoding).split(delimiter)[0]): bins += 1 except: pass else: with open(fpath, 'r', encoding=encoding) as f: for line in f: try: if float(line.split(delimiter)[0]): bins += 1 except: pass return bins
5bc02857ba87beb736b2a74baca665924863c68f
22,701
import random def rSel(iterable): """ rSel(iterable)\nrandom select a element""" if type(iterable) == dict: return random.choice(list(iterable.items())) else: idx = random.randint(0, len(iterable) - 1) return iterable[idx]
d7c2a146bbd1088c182e51f1d535b53c6f00d550
22,703
from pathlib import Path def get_garbage_frames(): """Init list with garbage objects.""" garbage_objects = [garbage_object.read_text() for garbage_object in Path(__file__).resolve().parent.glob('*.txt')] return garbage_objects
210420f7d5fa3b1b36021afe3f2316a1ccb88575
22,705
import time def current_datetimestring(): """Return a string with the date and time""" return " ".join([time.strftime('%x'), time.strftime('%X')])
e2e50bf2d2a9132e429dce28e092805575c509a4
22,706
def is_auxiliary_relation(token1, token2): """Return True if `token1` is an auxiliary dependent of `token2`.""" return ( (token1.upos == "AUX") and (token1.deprel in ["aux", "aux:pass"]) and (token2.upos == "VERB") and (token1.head == token2.id) )
1d0e6a50523e8c61e8cb77e68064db1a2750118f
22,707
from datetime import datetime def convert_string_date_to_datetime(now_str: str) -> datetime: """ Converts string of the format yyyy-dd-yy-hh-mm-ss to datetime format. :param now_str: str. String of the format yyyy-dd-yy-hh-mm-ss. """ numbers = [int(string_number) for string_number in now_str.split("-")] date = datetime(numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], numbers[5]) return date
f1eb59bb618758ffe82fa09db820fb141b5e7781
22,708
def toCategorical(df): """ This function change object datatype in pandas.DataFrame into category datatype. Parameters ---------- df : pandas.DataFrame with train or test DataFrame. Returns ------- df : pandas.DataFrame with new datatypes. """ columns=['availability','group','content','unit','pharmForm', 'campaignIndex','salesIndex', 'category', 'manufacturer'] for col in columns: if col in df.columns: df[col]=df[col].astype('category') return df
18ec1010be4404d340d2a3f55353c11fae64b844
22,709
def seperate_list(all_list, pred): """Given a predicate, seperate a list into a true and false list. Arguments: all_list {list} -- all items pred {function} -- predicate function """ true_list = [] false_list = [] for item in all_list: if pred(item): true_list.append(item) else: false_list.append(item) return true_list, false_list
9ef7f2f3a16eb83478c75f7c8bb8ec95af5e5eba
22,710
def count_parameters(model): """ Compute the number of trainable parameters of the model. :param model: type nn.Module :return: number of parameters, type int """ return sum(p.numel() for p in model.parameters() if p.requires_grad)
2d57e514e5480538d5a9586229af15b7a32b3791
22,711
from typing import Any from typing import Dict from typing import OrderedDict def convert_dict_to_ordered_dict(qconfig_dict: Any) -> Dict[str, Dict[Any, Any]]: """ Convert dict in qconfig_dict to ordered dict """ # convert a qconfig list for a type to OrderedDict def _convert_to_ordered_dict(key, qconfig_dict): qconfig_dict[key] = OrderedDict(qconfig_dict.get(key, [])) _convert_to_ordered_dict('object_type', qconfig_dict) _convert_to_ordered_dict('module_name_regex', qconfig_dict) _convert_to_ordered_dict('module_name', qconfig_dict) return qconfig_dict
bf5a8543a2b306d79824902a7b60200ee1994a4c
22,712
import sys import platform def multiarch_args(): """Returns args requesting multi-architecture support, if applicable.""" # On MacOS we build "universal2" packages, for both x86_64 and arm64/M1 if sys.platform == 'darwin': args = ['-arch', 'x86_64'] # ARM support was added in XCode 12, which requires MacOS 10.15.4 if clang_version() >= 12: # XCode 12 if [int(n) for n in platform.mac_ver()[0].split('.')] >= [10, 15, 4]: args += ['-arch', 'arm64', '-arch', 'arm64e'] return args return []
ba63f02534d364630fc5e4676d49fc56d4a61b56
22,713
def _extract_table_arrays(table): """Get buffer info from arrays in table, outputs are padded so dim sizes are rectangular. Args: table: A pyarrow.Table Return: tuple of: array_buffer_addrs: 3-dim list of buffer addresses where dims are columns, chunks, buffer addresses array_buffer_sizes: 3-dim list of buffer sizes, follows addrs layout array_lengths: 3-dim list of array lengths where dims are columns, chunks, length of array followed by child array lengths """ array_buffer_addrs = [] array_buffer_sizes = [] array_lengths = [] max_num_bufs = 0 max_num_chunks = 0 max_num_lengths = 0 # Iterate over each column in the Table for chunked_array in table: array_chunk_buffer_addrs = [] array_chunk_buffer_sizes = [] array_chunk_lengths = [] # Iterate over each data chunk in the column for arr in chunked_array.iterchunks(): bufs = arr.buffers() array_chunk_buffer_addrs.append( [b.address if b is not None else 0 for b in bufs] ) array_chunk_buffer_sizes.append( [b.size if b is not None else 0 for b in bufs] ) # Get the total length of the array followed by lenghts of children array_and_child_lengths = [len(arr)] # Check if has child array, e.g. list type if arr.type.num_children > 0: if hasattr(arr, "values"): array_and_child_lengths.append(len(arr.values)) else: raise ValueError("Only nested type currently supported is ListType") array_chunk_lengths.append(array_and_child_lengths) if len(bufs) > max_num_bufs: max_num_bufs = len(bufs) if len(array_and_child_lengths) > max_num_lengths: max_num_lengths = len(array_and_child_lengths) array_buffer_addrs.append(array_chunk_buffer_addrs) array_buffer_sizes.append(array_chunk_buffer_sizes) array_lengths.append(array_chunk_lengths) if len(array_chunk_lengths) > max_num_chunks: max_num_chunks = len(array_chunk_lengths) # Pad buffer addrs, sizes and array lengths so inputs are rectangular num_columns = len(array_buffer_sizes) for i in range(num_columns): # pad chunk list with empty lists that will be padded with null bufs if len(array_buffer_sizes[i]) < max_num_chunks: array_buffer_sizes[i].extend( [[]] * (max_num_chunks - len(array_buffer_sizes[i])) ) if len(array_lengths[i]) < max_num_chunks: array_lengths[i].extend([-1] * (max_num_chunks - len(array_lengths[i]))) num_chunks = len(array_buffer_sizes[i]) for j in range(num_chunks): # pad buffer addr, size, and array length lists if len(array_buffer_sizes[i][j]) < max_num_bufs: array_buffer_sizes[i][j].extend( [-1] * (max_num_bufs - len(array_buffer_sizes[i][j])) ) array_buffer_addrs[i][j].extend( [0] * (max_num_bufs - len(array_buffer_addrs[i][j])) ) if len(array_lengths[i][j]) < max_num_lengths: array_lengths[i][j].extend( [-1] * (max_num_lengths - len(array_lengths[i][j])) ) return array_buffer_addrs, array_buffer_sizes, array_lengths
81ca91d95305ab1b6541e603d90472ecd254f4df
22,714
import math def euclidean_distance(point1, point2): """ returns the euclidean distance between 2 points :param point1: The first point (an array of integers) :param point2: The second point (an array of integers) """ return math.sqrt(sum((point1 - point2) ** 2))
78888479fa3cfd3e53e235fa1d525b7dbe3314e1
22,715
def _is_pt_file(path: str) -> bool: """Returns true if the path is a tar archive and false otherwise.""" return path.endswith('.pt')
5f1dc9a9130f5cb25d44ac7b72b63af831538eb5
22,716
def checkEmblFile(filin): """Check EMBL annotation file given by user""" line = filin.readline() # TEST 'ID' if line[0:2] != 'ID': return 1 else: return 0
8087a78a35193545070f76dbd969b617c7c92b0c
22,717
import os def output(input_file, input_path, output_path, change = True, extra = None, begin = False, output_extension = None): """ Output filename from input filename. Parameters ---------- input_file : str Input filename. input_path : str Path from input file. output_path : str Path to output file. change : bool Change the directory structure. False: the same directory structure as input filename. True: change the directory structure, output path as root path (default). extra : str Extra name. begin : bool Add the extra name in the beginning of the output filename. False: extra name in the ending (default). True: extra name in the beginning. output_extension : str Output file extension. """ name, input_extension = os.path.splitext(input_file) dirname, basename = os.path.split(name) extension = output_extension if output_extension else input_extension if extra is None: filename = f'{basename}{extension}' else: if begin: filename = f'{extra}{basename}{extension}' else: filename = f'{basename}{extra}{extension}' if change: output_file = os.sep.join([output_path, filename]) else: root = dirname.replace(input_path, output_path) output_file = os.sep.join([root, filename]) return output_file
132a836f28f0ca151db857005ff15e4a923aa765
22,718
import math def calc_distance(asteroid1, asteroid2): """Calculates the distance between two asteroids""" x1, y1 = asteroid1 x2, y2 = asteroid2 dx = x2 - x1 dy = y2 - y1 return math.sqrt(dx * dx + dy * dy)
1c424a94c7638c0a913675c42f4e4226f0c7c97b
22,719
def get_value(item: str, json_package: dict) -> dict: """Return dict item.""" return_dict = {} if item in json_package: return_dict[item] = json_package[item] return return_dict
980325c5f8dfb51fa0551726081b3fea76ed3238
22,720
def gzipOA(directory=None, cmdverbose : int =0, add_check : bool = True, unzip_text : bool = False): """ Create script to compress all oa files in a directory with gzip """ if directory is not None: cmd = 'cd %s\n' % directory else: cmd = '' if cmdverbose>=2: cmd += 'echo "Running script generated by gzipOA"\n' if unzip_text: cmd += '# check for accidentially zipped text files\n' cmd += 'gzoafiles=$(ls *.oa.gz 2> /dev/null)\n' cmd += 'for f in $gzoafiles\ndo\n' cmd += 'format=$(oainfo -v 0 -f $f)\n' if cmdverbose>=3: cmd += 'echo "unzip? $f, format is $format"\n' cmd += 'if [ "$format" == 5 ]; then\n' if cmdverbose: cmd += 'echo "unzip $f, format is $format"\n' cmd += ' gzip -d -q -f $f; \n' cmd += 'fi\n' cmd += 'done\n' cmd += '\n' cmd += 'oafiles=$(ls *.oa 2> /dev/null)\n' cmd += 'noafiles=$(ls *.oa 2> /dev/null | wc -l)\n' cmd += '# echo "oafiles $oafiles\n' cmd += 'if [ "$noafiles" != "0" ]; then\n' if cmdverbose: cmd += 'echo "doing zip of $noafiles file(s)..."\n' if add_check: # bash only... cmd += 'for f in $oafiles\ndo\n' cmd += 'format=$(oainfo -v 0 -f $f)\n' cmd += 'if [ "$format" != 0 ]; then\n' if cmdverbose: cmd += 'echo "zip $f, format is $format"\n' cmd += ' gzip -q -f $f; \n' cmd += 'fi\n' cmd += 'done\n' else: cmd += ' gzip -q -f *.oa; \n' cmd += 'fi\n' return cmd
6f8dfa12369a654c7bc8dd24a3ac7f1faa650d71
22,721
def parse_response(data): """ Parse response and return json array of sites + associated data :param data: json data returned by Alexa API, such as that returned in fn(query_api_url) :return: json """ return data['Ats']['Results']['Result']['Alexa']['TopSites']['Country']['Sites']['Site']
077c1ee5e34538738218fb66c1eed04dfe8c18b8
22,722
def api_error(request): """Fallback error handler for frontend API requests.""" request.response.status_int = 500 # Exception details are not reported here to avoid leaking internal information. return { "message": "A problem occurred while handling this request. Hypothesis has been notified." }
3281c15f044a6c5b29ef0686e10cdf878fc445d1
22,723
def get_record_count(hook, database, table): """ gets the row count for a specific table """ query = '''SELECT t.row_count as record_count FROM "{}".information_schema.tables t WHERE t.table_name = '{}';'''.format(database, table) return hook.get_records(query).pop()[0]
24d5ae81093bd4ae411d13571ee9ee0d8e36da4e
22,724
import fnmatch def fpath_has_ext(fname, exts, case_sensitive=False): """returns true if the filename has any of the given extensions""" fname_ = fname.lower() if not case_sensitive else fname if case_sensitive: ext_pats = ['*' + ext for ext in exts] else: ext_pats = ['*' + ext.lower() for ext in exts] return any([fnmatch.fnmatch(fname_, pat) for pat in ext_pats])
6283429081ceed9a825d7d1541a660e1d12f9a2d
22,725
def area_triangle(base: float, height: float) -> float: """ Calculate the area of a triangle given the base and height. >>> area_triangle(10, 10) 50.0 >>> area_triangle(-1, -2) Traceback (most recent call last): ... ValueError: area_triangle() only accepts non-negative values >>> area_triangle(1, -2) Traceback (most recent call last): ... ValueError: area_triangle() only accepts non-negative values >>> area_triangle(-1, 2) Traceback (most recent call last): ... ValueError: area_triangle() only accepts non-negative values """ if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values") return (base * height) / 2
33520e4457a027157886ab14f1822b1675b2fa92
22,726
def map_category(category): """ Monarch's categories don't perfectly map onto the biolink model https://github.com/biolink/biolink-model/issues/62 """ return { 'variant' : 'sequence variant', 'phenotype' : 'phenotypic feature', 'sequence variant' : 'variant', 'phenotypic feature' : 'phenotype', # 'model' : 'model to disease mixin' }.get(category.lower(), category)
7481a12360e2f5e5fb33fb1b351633aae8eb1e64
22,727
import math def get_3d_pos_from_x_orientation(x_orientation, norm=1): """ Get a 3d position x, y, z for a specific x orientation in degrees Args: - (float) orientation around x axis - (float) norm to rescale output vector Return: - (float) x position [0; 1] * norm - (float) y position [0; 1] * norm - (float) z position [0; 1] * norm """ x_orientation_rad = math.radians(x_orientation) x_pos = 0 y_pos = -math.sin(x_orientation_rad) z_pos = math.cos(x_orientation_rad) return x_pos*norm, y_pos*norm, z_pos*norm
9cbecf86db3d3de5e203bde3381d6c3f05115577
22,728
def get_field_value(field, with_defaults) -> str: """ This helper need to extract value from field. """ if field.is_not_none: return field.failure_safe_value elif field.default and with_defaults: return field.default return ''
ad2529ac6a2bada2e235c4ad79891e8407108392
22,729
def make_system_simple(support, sampling_locations): """ Make measurement system from the samples and output support: signal support as binary array sampling locations: sampling locations N: number of samples to use """ M = sampling_locations.dot(support.T) M = M % 2 M = (-1)**M return M
6c12dc126777aa3b7b3223663503cac2f1ea32bd
22,730
import math def invertedCantorParingFunction(z): """ @see http://en.wikipedia.org/wiki/Pairing_function >>> invertedCantorParingFunction(0) (0, 0) >>> invertedCantorParingFunction(1) (1, 0) >>> invertedCantorParingFunction(2) (0, 1) >>> invertedCantorParingFunction(3) (2, 0) >>> invertedCantorParingFunction(4) (1, 1) """ w = int(math.floor((math.sqrt(8*z + 1) - 1) / 2)) t = (w**2 + w) / 2 y = z - t x = w - y return (x,y)
963b663949485cdbf3f1b738d49d684ce18cd28b
22,731
def get_filters(filters): """The conditions to be used to filter data to calculate the total sale.""" query_filters = [] if filters.get("company"): query_filters.append(["company", '=', filters['company']]) if filters.get("from_date"): query_filters.append(["posting_date", '>=', filters['from_date']]) if filters.get("from_date"): query_filters.append(["posting_date", '<=', filters['to_date']]) return query_filters
29d0da9d4d19702d032d33e04292e7a8d288e2ab
22,734
def IDX(n) -> bool: """Generate in index into strings from the tree legends. These are always a choice between two, so bool works fine. """ return bool(n)
ebeed641df5dabf046a25ffc66d28aa4d1a29ab4
22,735
import os def abspath(path): """Convert path to absolute path, with uppercase drive letter on win32.""" path = os.path.abspath(path) if path[0] != '/': # normalise Windows drive letter path = path[0].upper() + path[1:] return path
b26761a90a3d9e87761b4b4df77512daf44e11b9
22,736
import logging def dismiss_cancelled_notifications(notifications: list[dict], cancelled_notifications: list[dict]): """Remove the cancelled notifications from the notifications data structure. Parameters: notifications(list[dict]): A list of notifications, which are in dictionary format. cancelled_notifications(list[dict]): A list of notifications that have been cancelled by the user, also in dictionary format. Returns: If the length of the cancelled notifications list is 0, the function will just return the list of notifications as there is nothing to cancel. Otherwise it will parse the list of notifications, checking if the notification is in the list of cancelled notifications. If it is, it will remove the notification from the list of the notifications. Once every notification has been checked, it will return the new list of notifications. Exceptions: No stated exceptions. Relation to main: This function will be called every time the page refreshes to remove the notifications that the user has cancelled from the notifications data structure. """ if len(cancelled_notifications) == 0: logging.info("""FUNCTION:dismiss_cancelled_notifications:No notifications to cancel""") return notifications for i in range(len(notifications)): for notification in notifications: if notification in cancelled_notifications: notifications.remove(notification) logging.info("""FUNCTION:dismiss_cancelled_notifications:Notifications dismissed succesfully""") return notifications
e496ff8f1c0b6581bd2f68f287e02f3a61f02f93
22,738
def extract_data(source:str): """Read list of newline delineated ints from text file.""" with open(source, "r") as f: data = f.readlines() data = [int(el) for el in data if el != "/n"] return data
a3ea4301699bad013fa479503ac9fc460781a34e
22,739
def yes_no_dialog(prompt: str) -> bool: """Return true/false based on prompt string and Y/y or N/n response.""" while True: print() print(prompt) print() s = input("Y/n to confirm, N/n to reject >") if s in ("Y", "y"): return True if s in ("N", "n"): return False
d0b8de6173621c38b8f2072ecaf2f6536a188cd1
22,740
def gatherIntel(analyzer, scs, sds): """Gather all of the intelligence about the program's different features. Args: analyzer (instance): analyzer instance to be used scs (list): list of (sark) code segments to work on sds (list): list of (sark) data segments to work on Return Value: True iff everything was successful """ # calibrate the features needed for the function classifier if not analyzer.func_classifier.calibrateFunctionClassifier(scs): return False # Observe function features from identified functions analyzer.func_classifier.trainFunctionClassifier(scs) # calibrate the features needed for the function type classifier if analyzer.hasActiveCodeTypes(): if not analyzer.func_classifier.calibrateFunctionTypeClassifier(scs): return False # Observe function features from identified functions analyzer.func_classifier.trainFunctionTypeClassifier(scs) # Observe the code features from all of the code if analyzer.isCodeContainsData() and analyzer.locals_identifier.observeLocalConstants(scs) is None: return False # Everything went well if we reached this line return True
d4a4a395f40a26287b1fd136712b070ed7f3e989
22,743
import operator def merge_sequences(target, other, function=operator.add): """ Merge two sequences into a single sequence. The length of the two sequences must be equal. """ assert len(target) == len(other), 'sequence lengths must match' return type(target)([function(x, y) for x, y in zip(target, other)])
a1d734c8bceb08c4af7d39d8ad048fdc53cda79d
22,744
def substitute(vars,s): """This function replaces words in percent signs in our in.txt file""" #get rid of things in dollar signs and replace with their corresponding key for key in vars.keys(): s = s.replace("%"+key+"%", vars[key]) return s
e22eb64962ff96bcffe052f5e5ac3784bb343b67
22,745
def getHuffmanCoding( tree, prefix="", code = {} ): """ Projde rekurzivně předaný strom a ohodnotí jednotlivé znaky dle pravidel Huffmanova kódování. To znamená, že hrana jdoucí od svého rodiče nalevo, má ohodnocení 0 a hrana napravo ohodnocení 1. Ohodnocení samotného vrcholu je tvořen posloupností kódů Parametry: ---------- tree: dictionary Slovník reprezentující strom, který se má projít prefix: string Řetězec obsahující kód cesty k tomuto node code: dictionary Slovník obsahujcí páry písmen/číslic (klíč) a jejich kódu. Vrací: ------ dictionary Slovník obsahující páry písmen/čísli (klíč) a jejich kódu. """ # Pokud je potomek nalevo jen "rodičovský node" if tree["left"]["name"] == "": getHuffmanCoding( tree["left"], prefix+"0", code) else: # Node je znak/číslo, rovnou vytvoří jeho kód code[ tree["left"]["name"] ] = prefix+"0" # Pokud je potomek napravo jen "rodičovský node" if tree["right"]["name"] == "": getHuffmanCoding( tree["right"], prefix+"1", code ) else: # Node je znak/čéslo, rovnou vytvoří kód code[ tree["right"]["name"] ] = prefix+"1" return (code)
691d5c2545a250a7036a878cc90c47c16be22f66
22,746
from typing import Dict from typing import OrderedDict def _scores_to_ranks( *, scores: Dict, reverse: bool = False ) -> Dict[str, float]: """ Go from a score (a scalar) to a rank (integer). If two scalars are the same then they will have the same rank. Takes a dictionary where the keys are the pk of the results and the values are the scores. Outputs a dictionary where they keys are the pk of the results and the values are the ranks. """ scores = OrderedDict( sorted(scores.items(), key=lambda t: t[1], reverse=reverse) ) ranks = {} current_score = current_rank = None for idx, (pk, score) in enumerate(scores.items()): if score != current_score: current_score = score current_rank = idx + 1 ranks[pk] = current_rank return ranks
ee86ea631167e043c1c4dabee5d3c316a46d6a68
22,748
import os def strip_path(filepath): """ 'same/path/to/filename.jpg' -> 'filename.jpg' For example usage see doc of file_exists. """ return os.path.split(filepath)[1]
a2c5cb832f71c143bb00310333b0bfd2ceeb2d39
22,750
def get_unique_words(sentences): """ Input: An array of sentences(array of words) obtained after preprocessing Output: A dictionary of unique words, each with an assigned index to define the order """ # A dictionary to track whether a word has been seen before or not dic = {} # Initialize an empty set of unique words unique_words = {} index = 0 for sentence in sentences: for word in sentence: if word not in dic: dic[word] = 1 unique_words[word] = index index += 1 return unique_words
797ac7c1effca9b2c085709b6d7c564c3818f084
22,751
def construct_policy(mode, name, value='', vtype=''): """Map the mode and return a list containing the policy dictionary.""" default = { 'policy_type': 'unknown' } policy_map = { 'set_reg_value': { 'policy_type': 'regpol', }, 'delete_reg_value': { 'policy_type': 'regpol', 'action': 'DELETE' }, 'set_secedit_value': { 'policy_type': 'secedit', }, } mapped = policy_map.get(mode, default) mapped['key'] = name mapped['name'] = name mapped['value'] = value mapped['vtype'] = vtype return [mapped]
c8510729ed9497e4d25ee71be43588ecffd37f49
22,752
def get_resource(requests, reference): """ Fetches a resource. ``reference`` must be a relative reference. e.g. "Patient/12345" """ response = requests.get(endpoint=reference, raise_for_status=True) return response.json()
107fdde1ee4db029b2c279e3548f6969e2e6c16a
22,753
def printTime(t): """ Takes time in seconds, and print in the output in human friendly format (DD:hh:mm:ss) """ t = round(t) if t < 60: return ("%d seconds" %t), (0, 0, 0, t) else: m = int(t/60) s = t%60 if (m < 60) & (m > 1): return ("%d minutes, %d seconds" %(m, s)), (0, 0, m ,s) elif m == 1: return ("%d minute, %d seconds" %(m, s)), (0, 0, m ,s) else: h = int(m/60) m = m%60 if (h < 24) & (h > 1): return ("%d hours, %d minutes, %d seconds" %(h, m, s)), (0, h, m ,s) elif h == 1: return ("%d hour, %d minutes, %d seconds" %(h, m, s)), (0, h, m ,s) else: d = int(h/24) h = h%24 if d > 1: return ("%d days, %d hours, %d minutes, %d seconds" %(d, h, m, s)), (d, h, m ,s) else: return ("%d day, %d hour, %d minutes, %d seconds" %(d, h, m, s)), (d, h, m ,s)
7cb8c5bf225b66b84bd97e9e1ffefc7f905339e8
22,754
from typing import List def get_currencies(self) -> List: """ Shows active currencies list :return: the list of currencies that are active at the moment, with their names as identified in the system """ method = 'GET' api_url = '/api/v1/currency' path = self._host + api_url signature = self._make_signature(method, api_url) response = self._make_request(method, path, signature) return response.get("currencies")
a9427c34ef96182415ad859a6091f42d33e8dd1d
22,755
import re def sanitizeTitle(title): """Sanitizes the passed title string to allow the file to be saved on windows without introducing illegal characters""" matches = re.findall(r"[^/\\:*?\"<>|]+", title) fullstring = "" for match in matches: fullstring += match return fullstring
26fe9ee3ca5d03eae75a7f9f621c135e318cfeef
22,758
def get_alias(dataset): """Get alias for dataset. Parameters ---------- dataset : dict Dataset metadata. Returns ------- str Alias. """ alias = f"{dataset['project']} dataset {dataset['dataset']}" additional_info = [] for key in ('mip', 'exp', 'ensemble'): if key in dataset: additional_info.append(dataset[key]) if additional_info: alias += f" ({', '.join(additional_info)})" if 'start_year' in dataset and 'end_year' in dataset: alias += f" from {dataset['start_year']:d} to {dataset['end_year']:d}" return alias
36d3322caca81a77301c0b28993e73a186f5bb8a
22,760
import bz2 def compress(data): """ Helper function to compress data (using bz2) """ c = bz2.BZ2Compressor() a = c.compress(data) b = c.flush() return a+b
5d6bbcb357f71f69d80a25803279064c2458333c
22,762
import six def lowercase_value(value): """ Lowercase the provided value. In case of a list, all the string item values are lowercases and in case of a dictionary, all of the string keys and values are lowercased. """ if isinstance(value, six.string_types): result = value.lower() elif isinstance(value, (list, tuple)): result = [str(item).lower() for item in value] elif isinstance(value, dict): result = {} for key, value in six.iteritems(value): result[key.lower()] = str(value).lower() else: result = value return result
693cbf5477abf0a54dca0e4e6914f6c7bf2d9b46
22,763
import re def check_gline(rnames_line, gline_in, rct_cmpds, sort_rcts=[], verbose= True, map_dict=dict({})): """Function to take a reaction line and a Gstr line and make sure that the reactants in the Gstr line is what are actually are the rcts in the rxn line!!! Will OVERWRITE the Gs-line if differences are found. Inputs: ------- rnames_line- String from a rxn list that has an indv reaction on it: rnames_line= "Rnames{i}='14Z+ C= 12V + 3.45G'; % Comment v important " gline_in - String from a f_list that has fractional stoichiometry for rxn on it. Can be empty. gline_in= 'Gstr{i,1} ='Z'; Gstr{i,2} ='C'; % V impt comment' rct_cmpds - A list of reactants in the rnames_line. = ['Z','C'] sort_rcts - A list of indices that sort rct_cmpds. verbose - Boolean of whether to print errors if found or not. Outputs: -------- gline_out - A string corresponding to gline in where the reactants used in the gline have been verified against the reactants in the rnames line. If sort_indx is passed, it will also be sorted so that things appear in the gline as they would in the sorted reaction.(Obv with rcts before prods still)... ' Gstr{i,1} ='C'; Gstr{i,2} ='Z'; % V impt comment' Author: ------- Dr. Jessica D. Haskins (jhaskins@alum.mit.edu) GitHub: @jdhask Change Log: ---------- 1/18/2022 JDH Created """ if len(gline_in )>0: gs_cmts= gline_in.split('%') # Look for any comments after line else: gs_cmts=['', '']; comments=' % '+' '.join(gs_cmts[1:]) if len(gs_cmts) >1 else '' # Store it if you got one. # Get a list of the gstrs you have in gline_in for this rx gs_have=[g.replace(' ', '')+';' for g in gs_cmts[0].split(';') if g.replace(' ', '')+';' != ';'] # Map gs you have to what they'd be using map dict so comparison to mapped shoulds isn't wonky. for gi, gstr in enumerate(gs_have) : for old in map_dict.keys(): if "'"+old+"';" in gstr: gs_have[gi]=gstr.replace("'"+old+"'", "'"+str(map_dict[old])+"'") # Build a list of what you *should* have for all the Gstrs on this line. should_have= ["Gstr{i,"+str(ii+1)+"}='"+cmpd_i+"';" for ii, cmpd_i in enumerate(rct_cmpds) if cmpd_i != 'hv'] # Don't pop an error just because the items in G are sorted in a dif way for n, rct_i in enumerate(rct_cmpds): for indxx, gstr in enumerate(gs_have): if rct_i in gstr: current_ind= [h.group() for h in re.finditer(r"(?<=Gstr{i,).*?(?=}\=\'"+rct_i+"\';)", gstr) if h.group() !=''] if len(current_ind) >0: indd=current_ind[0] gs_have[indxx]=gstr.replace("{i,"+str(indd)+"}","{i,"+str(n+1)+"}") # Check whether you have all the Gstrs you should have.... Is True if Invalid found invalid=[indd for indd, gstr in enumerate(gs_have) if gstr not in should_have] if len(invalid) >0 and verbose is True: print ('Bad G-string found in rxn: ', "'"+rnames_line+"'", "\n", 'Original: ',gs_cmts[0], "\n" ) [print(' This string:', "'"+gs_have[baddie]+"'", 'should be in', "'",should_have,"' \n \n") for baddie in invalid] # Now Craft the G-List based on what you should have.... gline_out= ' '. join(should_have)+ comments return gline_out
1394ebfed1e752791284af5a706aea1c73de3ce7
22,764
from pathlib import Path def get_package_list(): """ We get all the package names from which the documentation is available. """ package_list = [] for stuff in (Path().resolve().parent / "code_documentation").glob("*"): if stuff.is_dir(): package_list.append(stuff.name) package_list.sort() print("The list of all the available documentation yet") print("package_list = ", package_list) return package_list
35539738dc82190f123f19b5906b1071b4e3d192
22,765
import pkg_resources from typing import Dict def get_package_versions() -> Dict[str, str]: """A utility function that provides dependency package versions At the moment it is and experimental utility. Everything is under try-catch in case something goes wrong :return: A dictionary with versions """ try: dependency_packages = ["eogrow"] + [ requirement.name for requirement in pkg_resources.working_set.by_key["eogrow"].requires() # type: ignore ] return {name: pkg_resources.get_distribution(name).version for name in dependency_packages} except BaseException as ex: return {"error": repr(ex)}
f2eb0569c1ddf887949ed6eeaddb12f80c0c43ba
22,767
def are_checksums_equal(checksum_a_pyxb, checksum_b_pyxb): """Determine if checksums are equal. Args: checksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare. Returns: bool - ``True``: The checksums contain the same hexadecimal values calculated with the same algorithm. Identical checksums guarantee (for all practical purposes) that the checksums were calculated from the same sequence of bytes. - ``False``: The checksums were calculated with the same algorithm but the hexadecimal values are different. Raises: ValueError The checksums were calculated with different algorithms, hence cannot be compared. """ if checksum_a_pyxb.algorithm != checksum_b_pyxb.algorithm: raise ValueError( "Cannot compare checksums calculated with different algorithms. " 'a="{}" b="{}"'.format(checksum_a_pyxb.algorithm, checksum_b_pyxb.algorithm) ) return checksum_a_pyxb.value().lower() == checksum_b_pyxb.value().lower()
533151d79a4f5fabc62d52f9e021b7daf6bd8d71
22,768