content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def isMail(mail): """ check if a mail :param mail: mail :type mail: str :return: True of False """ if re.compile(r'[^\._][\w\._-]+@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$').match(mail): return True else: return False
c1fc308a89cefb86b50bff594bc6ed4c85abd3cb
35,787
def get_automl_options_string(args): """ This function creates a string suitable for passing to another script of the automl command line options. The expected use case for this function is that a "driver" script is given the automl command line options (added to its parser with add_automl_options), and it calls multiple other scripts which also accept the automl options. Parameters ---------- args : argparse.Namespace A namespace containing all of the options from add_automl_options Returns ------- string: A string containing all of the automl options """ args_dict = vars(args) # first, pull out the text arguments automl_options = ['out', 'tmp', 'seed', 'total_training_time', 'iteration_time_limit', 'ensemble_size', 'ensemble_nbest'] # create a new dictionary mapping from the flag to the value automl_flags_and_vals = {'--{}'.format(o.replace('_', '-')) : args_dict[o] for o in automl_options if args_dict[o] is not None} estimators = "" if args_dict['estimators'] is not None: estimators = " ".join(mt for mt in args_dict['estimators']) estimators = "--estimators {}".format(estimators) s = ' '.join("{} {}".format(k,v) for k,v in automl_flags_and_vals.items()) s = "{} {}".format(estimators, s) return s
bfe27e8e76666e0e5ca0f74feef8465234baebe8
35,788
import torch def get_normal(xs): """Normals of xs. Args: xs: tensor [num_xs, 2] Returns: tensor [num_xs, 2] """ return torch.stack([xs[:, 1], -xs[:, 0]], dim=-1)
b8bb383e4750651d391885a898d481e53d80add8
35,790
import torch import math def euler_angles_to_rotation_matrix(car_rotation, is_dir=False): """Convert euler angels to quaternions. Input: angle: [roll, pitch, yaw] is_dir: whether just use the 2d direction on a map """ roll, pitch, yaw = car_rotation[:,0], car_rotation[:,1], car_rotation[:,2] rollMatrix = torch.tensor([[ [1, 0, 0], [0, math.cos(roll[i]), -math.sin(roll[i])], [0, math.sin(roll[i]), math.cos(roll[i])]] for i in range(car_rotation.shape[0])]) pitchMatrix = torch.tensor([[ [math.cos(pitch[i]), 0, math.sin(pitch[i])], [0, 1, 0], [-math.sin(pitch[i]), 0, math.cos(pitch[i])]] for i in range(car_rotation.shape[0])]) yawMatrix = torch.tensor([[ [math.cos(yaw[i]), -math.sin(yaw[i]), 0], [math.sin(yaw[i]), math.cos(yaw[i]), 0], [0, 0, 1]] for i in range(car_rotation.shape[0])]) R = torch.matmul(torch.matmul(yawMatrix, pitchMatrix), rollMatrix) return R
bb8c73f9432bfbe3730961ae34ff2d2b9663b0a5
35,791
def mock_purge_unauth_url(url, request): """ Mock a purge request in which the credentials are valid, but the url that was requested is not allowed """ return {'status_code': 403, 'content-type': 'application/json', 'server': 'Apache', 'content': { "supportId": "123456789", "title": "unauthorized arl", "httpStatus": 403, "detail": "http://www.example.com/bogus", "describedBy": "https://api.ccu.akamai.com/ccu/v2/errors/" "unauthorized-arl"} }
a15e329f79fb6a004e88624781d8b8e90d438e00
35,795
import csv def extract_pids_from_file(pid_file): """ Extracts pids from file containing a header and pids in each line :param pid_file: path to the file containing the pids :return: list of ints """ pids = [] with open(pid_file, 'r') as f: csv_rows = csv.reader(f) next(csv_rows) for row in csv_rows: pids.append(int(row[0])) return pids
77c74ec079d4c55d4c115b5966f415d6ae78e4e6
35,796
def pythagorean_triplet(a, b, c): """ Tests whether a^2 + b^2 = c^2. """ return a**2 + b**2 == c**2
3bda7322d0a8f4af5d4faa3f4ef5d5c34acbc6d3
35,797
def process_rows_of_trump_tweets(rows): """ """ text = [] punctuation = [".", ",", "!", '"', "-", "?", ":"] # a "row" is a list of strings, but only some of them are words for row in rows[:-1]: # first row is header etc. for (idx, entry) in enumerate(row): ## get text out of the row entries if idx == len(row) - 1: # this entry is a timestamp, e.g. '13:15:14,14906,3925,811560662853939200' continue elif idx == len(row) - 2: # this entry is a comma-delimited 2-ple of a word and a date, e.g. ['states!', '2016-12-21'] entry = entry.split(",")[0] ##remove punctuation if necessary and grab the word word = entry while len(word) > 1: if word[-1] in punctuation: word = word[:-1] elif word[0] in punctuation: word = word[1:] else: break ## more cleaning if ( (len(word) > 0) and (word not in punctuation) and (word[:4] != "http") and (word[0] is not "#") ): text.append(word.lower()) return text
a52e49a62c5bfbc90c481b90b85a3b3dc30460ed
35,798
from pathlib import Path import filecmp def are_directories_equal(dir1: Path, dir2: Path) -> bool: """Compares two directories recursively. Files in each directory are assumed to be equal if their names and contents are equal. Args: dir1: The first directory. dir2: The second directory. Returns: `True` if they are equal, `False` otherwise. """ dirs_cmp = filecmp.dircmp(dir1, dir2) if dirs_cmp.left_only or dirs_cmp.right_only: return False (_, mismatches, errors) = filecmp.cmpfiles( dir1, dir2, dirs_cmp.common_files, shallow=False ) if mismatches or errors: return False for common_dir in dirs_cmp.common_dirs: new_dir1 = Path(dir1, common_dir) new_dir2 = Path(dir2, common_dir) is_equal = are_directories_equal(new_dir1, new_dir2) if not is_equal: return False return True
976fe0ba26c50b67204b9888eec4176a328d114e
35,799
import argparse def get_arguments(): """ Wrapper function to get the command line arguments. Inserting this piece of code into its own function for conda compatibility. """ parser = argparse.ArgumentParser( prog='StringMeUp', usage='stringmeup --names <FILE> --nodes <FILE> [--output_report <FILE>] [--output_classifications <FILE>] [--output_verbose <FILE>] [--keep_unclassified] [--minimum_hit_groups INT] [--gz_output] [--help] confidence classifications', description='A post-processing tool to reclassify Kraken 2 output based on the confidence score and/or minimum minimizer hit groups.') parser.add_argument( 'confidence_threshold', metavar='confidence', type=float, help='The confidence score threshold to be used in reclassification [0-1].') parser.add_argument( 'original_classifications_file', metavar='classifications', type=str, help='Path to the Kraken 2 output file containing the individual read classifications.') parser.add_argument( '--output_report', metavar='FILE', type=str, help='File to save the Kraken 2 report in.') parser.add_argument( '--output_classifications', metavar='FILE', type=str, help='File to save the Kraken 2 read classifications in.') parser.add_argument( '--keep_unclassified', action='store_true', help='Specify if you want to output unclassified reads in addition to classified reads. NOTE(!): This script will always discard reads that are unclassified in the classifications input file, this flag will just make sure to keep previously classified reads even if they are reclassified as unclassified by this script. TIP(!): Always run Kraken2 with no confidence cutoff.') parser.add_argument( '--output_verbose', metavar='FILE', type=str, help='File to send verbose output to. This file will contain, for each read, (1) original classification, (2) new classification, (3) original confidence, (4), new confidence (5), original taxa name (6), new taxa name, (7) original rank, (8) new rank, (9) distance travelled (how many nodes was it lifted upwards in the taxonomy).') parser.add_argument( '--names', metavar='FILE', required=True, help='Taxonomy names dump file (names.dmp)') parser.add_argument( '--nodes', metavar='FILE', required=True, help='Taxonomy nodes dump file (nodes.dmp)') parser.add_argument( '--minimum_hit_groups', metavar='INT', type=int, help='The minimum number of hit groups a read needs to be classified. NOTE: You need to supply a classifications file (kraken2 output) that contain the "minimizer_hit_groups" column.') parser.add_argument( '--gz_output', action='store_true', help='Set this flag to output <output_classifications> and <output_verbose> in gzipped format (will add .gz extension to the filenames).' ) args = parser.parse_args() return args
452ac8f1e6edbe8dae67731b1b76275595330bcc
35,801
import sys import io def stdout_fileno_available(): """ Tests if sys.stdout.fileno is available in this testing environment """ try: sys.stdout.fileno() return True except io.UnsupportedOperation: return False
0b29bb8c0598738efcd6fc91c3159fc5b2acfeb5
35,802
def int16_to_bits(x): """ Unpack a 16 bit integer into binary fields. See the syntax for this here https://docs.python.org/3/library/string.html#format-specification-mini-language Parameters ---------- x : int16 single integer. Returns ------- List of binary fields aligned so int16_to_bits(1024)[0] = bit 0 """ assert isinstance(x, int), 'x should be integer' return [int(b) for b in f'{x:016b}'.format(x)[::-1]]
5993bfdae9666d364f9b2629fbbb862965cedddd
35,803
from typing import Callable import ast import importlib import functools def str_to_class(module_name: str, function_name: str) -> Callable: """Convert a string to a class Base on: https://stackoverflow.com/a/1176180/576363. Also support function arguments, e.g. ifft(dim=2) will be parsed as a partial and return ifft where dim has been set to 2. Examples -------- >>> def mult(f, mul=2): >>> return f*mul >>> str_to_class(".", "mult(mul=4)") >>> str_to_class(".", "mult(mul=4)") will return a function which multiplies the input times 4, while >>> str_to_class(".", "mult") just returns the function itself. Parameters ---------- module_name: str e.g. direct.data.transforms function_name: str e.g. Identity Returns ------- object """ tree = ast.parse(function_name) func_call = tree.body[0].value # type: ignore args = [ast.literal_eval(arg) for arg in func_call.args] if hasattr(func_call, "args") else [] kwargs = ( {arg.arg: ast.literal_eval(arg.value) for arg in func_call.keywords} if hasattr(func_call, "keywords") else {} ) # Load the module, will raise ModuleNotFoundError if module cannot be loaded. module = importlib.import_module(module_name) if not args and not kwargs: return getattr(module, function_name) return functools.partial(getattr(module, func_call.func.id), *args, **kwargs)
d7a31b2d2a8352262fe36f8d82465801bc211ce3
35,804
def template(name, types=()): """template decorator.""" def identity(f): return f return identity
d8d98ead8b89807e46718c9bce662d77f4921521
35,805
def distsimpleimg(coords): """Return distance (mm) from muscle insertion point to the place where the single image ultrasound was taken Arguments: coords {array} -- Array containing x and y coordinates of calibration scale, insertion point and place of the image. Returns: [float] -- distance in cm """ cal_fct = coords[1, 1] - coords[0, 1] insertion = coords[2] img_place = coords[3] distance = [(insertion - img_place) / cal_fct][0][0] return distance
0aeb732895fd40393e4e175adbb493018932b867
35,807
def remove_redacted(obj): """ Removes all string object with the value __redacted__ """ if isinstance(obj, str): if obj == "__redacted__": return True, obj else: return False, obj elif isinstance(obj, list): for index, item in enumerate(obj): remove, obj[index] = remove_redacted(item) if remove: del obj[index] return False, obj elif isinstance(obj, dict): to_remove = [] for k, v in obj.items(): remove, obj[k] = remove_redacted(v) if remove: to_remove.append(k) for k in to_remove: del obj[k] return False, obj return False, obj
1ca2ba707b3b64ec29245d122f19b5547141de4b
35,809
def index(): """[summary] Hello world function [description] This function is only for testing if the web service is in operating """ return "Hello, this is the Credential Manager component!"
11557d0d85eefa8254d49a8cce7d4e805f386a70
35,810
def permute_observation(obs, perm): """Given a permutation, shuffle pixels of the observation.""" return obs.flatten()[perm].reshape(obs.shape)
ac18bce7d344b89cbcba8ea22ebcb92ff3f9c0e9
35,812
def _check_type_and_items(_converted_item, _control_item, _new_type): """This function facilitates testing the :py:func:`khoros.utils.core_utils.convert_set` function.""" _correct_type = True if isinstance(_converted_item, _new_type) else False _items_present = True for _item in _control_item: if _item not in _converted_item: _items_present = False return all((_correct_type, _items_present))
8d1b94644f1e8f910c8152d1aed4dc3f35a1db7b
35,814
from typing import Optional import os def get_token() -> Optional[str]: """ get the token from environment variable """ return os.environ.get('WECHATY_PUPPET_SERVICE_TOKEN', None) or \ os.environ.get('TOKEN', None) or \ os.environ.get('token', None) or None
57ff2a336aace23d399522439e1aaf9f76e82ed0
35,815
def size_to_bytes(size): """ Return the size as a bytes object. @param int size: a 32-bit integer that we want to convert to bytes @rtype: bytes >>> list(size_to_bytes(300)) [44, 1, 0, 0] """ # little-endian representation of 32-bit (4-byte) # int size return size.to_bytes(4, "little")
722ab782250570779519f8d8fdca4d5b449324d5
35,817
def mergetwolist(l1,l2): """ This function merges items in two sorted arrays """ n = len(l1) m = len(l2) if not (l1 and l2): return elif not l1 or n==0: return l2 elif not l2 or m==0: return l1 l3 = [] j=i=0 while i<n and j<m: if l1[i]==l2[j]: l3.append(l1[i]) l3.append(l2[j]) i+=1 j+=1 elif l1[i] < l2[j]: l3.append(l1[i]) i+=1 elif l2[j] < l1[i]: l3.append(l2[j]) j+=1 while i<n: l3.append(l1[i]) i+=1 while j<m: l3.append(l2[j]) j+=1 return l3
48d4adc39b24dece6739e0f51eaef790785dbc2f
35,820
def remove_clear_passwd(data): """ Removes clear passwords from the data received :param data: data with clear password :return: data without the password information """ passw = ['password: ', 'passwd: '] for pattern in passw: init = data.find(pattern) while init != -1: end = data.find('\n', init) data = data[:init] + '{}******'.format(pattern) + data[end:] init += 1 init = data.find(pattern, init) return data
d607a025a60e8fa3abadde9724c729f85b03c9c7
35,821
from typing import Any def issubclass_safe(candidate: Any, ancestor: Any) -> bool: """Returns True the candidate is a subclass of the ancestor, else False. Will return false instead of raising TypeError if the candidate is not a class.""" try: return issubclass(candidate, ancestor) except TypeError: return False
54fec7e6861de36015d8264978b76073704080d6
35,822
import requests def served(url): """Return True if url returns 200.""" r = requests.get(url, allow_redirects=False) return r.status_code == 200
1c4f1025bc36dc6e1b1f7fe1d519e5d557ade813
35,823
def _compute_adjusted_padding(input_size, output_size, kernel_size, stride, padding, dilation=1): """Computes adjusted padding for desired ConvTranspose `output_size`.""" kernel_size = (kernel_size -1) * dilation + 1 if padding == 'SAME': expected_input_size = (output_size + stride - 1) // stride if input_size != expected_input_size: raise ValueError(f'input_size must be {expected_input_size} when padding is SAME') padding_needed = max(0, (input_size-1)*stride + kernel_size - output_size) total_padding = padding_needed // 2 elif padding == 'VALID': expected_input_size = (output_size - kernel_size + stride) // stride if input_size != expected_input_size: raise ValueError(f'input_size must be {expected_input_size} when padding is VALID') total_padding = 0 else: total_padding = padding[0] expanded_input_size = (input_size - 1) * stride + 1 padded_out_size = output_size + kernel_size - 1 pad_before = kernel_size - 1 - total_padding pad_after = padded_out_size - expanded_input_size - pad_before return (pad_before, pad_after)
009bdc034d019d130dfc6f050c1f814ff2dbac34
35,824
def czyMur(mapObj, x, y): """Zwraca True jesli (x,y) pozycja na mapie jest murem, w.p.p. zwraca False""" if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]): return False # (x,y) nie sa na mapie elif mapObj[x][y] in ('#'): return True # mur na drodze return False
617ebf983c41fdcb5399f57b89d126469e93875e
35,825
import numpy def melody_blocker(snippet): """ Makes a mask where anything above the top line of the snippet is 1. Also enforces empty space a major 2nd above and below the melody. (This means the optimizer will consider any note above the top line of the melody, or too close to the melody, wrong.) """ envelope = numpy.copy(snippet) _, length = snippet.shape for i in range(length): occupied = [x for x in range(88) if snippet[x, i] != 0] if len(occupied) == 0: continue top = max(occupied) envelope[top:, i] = 1 for pitch in occupied: envelope[pitch-2:pitch+3, i] = 1 return envelope
fed4e3e78f4dc70898f16be93522b8ba9d15635a
35,826
import json def generate_code(data): """ generate a small arduino program code with the switch on and off extracted from the waveform """ if not "on" in data: return "Please add data" # extract the data from the swtich on - off store container switchON = data["on"] switchOFF = data["off"] fs = data["fs"] length = data["length"] # fill an array with the switch on and off timings needed for the source code timings = [] # transform all times into microseconds timeScale = 1000000 / fs # check all switch on ans offs and check and make a smale plausible check for i in range(len(switchON)): if switchOFF[i] < switchON[i]: return "Error! Please optimize the signal extraction." if i < len(switchON)-1: if switchON[i+1] < switchOFF[i]: return "Error! Please optimize the signal extraction." timings.append( [round((switchOFF[i] - switchON[i])*timeScale), round((switchON[i+1] - switchOFF[i])*timeScale)] ) else: timings.append( [round((switchOFF[i] - switchON[i])*timeScale), round((length - switchOFF[i])*timeScale)] ) # transform this into source code using json serialization and transforms to get c++ 11 output = """ # include <Arduino.h> const int signalPin = 32; unsigned int timings[{}][2] = {}; void myDelay(unsigned int d) {{ if (d < 10000) {{ delayMicroseconds(d); }} else {{ delay(d / 1000); }} }} void setup() {{ pinMode(signalPin, OUTPUT); }} void loop() {{ for (unsigned int* elem : timings) {{ digitalWrite(signalPin, HIGH); myDelay(elem[0]); digitalWrite(signalPin, LOW); myDelay(elem[1]); }} }} """.format( len(timings), json.dumps(timings).replace('[', '{').replace(']', '}').replace('}, ', '},\n\t').replace('{{', '{\n\t{').replace('}}', '}\n}')) return output
081e21629956b34685529506a2d778dd4cee086f
35,827
def get_default_token(): """ Returns the value of the default auth token """ path = "/var/run/secrets/kubernetes.io/serviceaccount/token" with open(path, "rb") as tokenfile: token = tokenfile.read() return token.strip()
0279b994bec8017c61baa56997d19bf7446e7152
35,828
def massage_ip(ip: str) -> str: """ Prepend 10.10.10 to a string. Allow the user to pass just the last segment of an ipv4 address. """ dots = len([c for c in ip if c == "."]) if dots == 0: return f"10.10.10.{ip}" return ip
43335768b3dd931b4bc5aa603a527696c5128001
35,829
import requests def does_exist(url: str) -> bool: """Determines if a particular file exists on the server.""" resp = requests.head(url) if resp.status_code == requests.codes.ok: return True return False
b506d654e5a89a1a35911e8c8089bcc442169e6b
35,830
from typing import OrderedDict def build_fhir_id(key1, value1, key2, value2, key3, value3): """ Construct an OrderedDict for ID :param key1: :param value1: :param key2: :param value2: :param key3: :param value3: :return: """ id_info = OrderedDict() id_info[key1] = value1 id_info[key2] = value2 id_info[key3] = value3 return id_info
f2db277f21683b3ce910b5398b90e607f9cf6a40
35,831
import argparse import os def CheckExtension(choices): """Argparse action to check a file extension at loading. Arguments: choices (Dictionary): List of allowed extensions {'ext1, ext2, ...'}. """ class Act(argparse.Action): def __call__(self, parser, namespace, fname, option_string=None): if fname is None: setattr(namespace,self.dest,None) else: ext = os.path.splitext(fname)[1][1:] if ext not in choices: option_string = '({})'.format(option_string) if option_string else '' parser.error(str(ext) + " isn't supported, please use " + str(choices) + " files") else: setattr(namespace,self.dest,fname) return Act
8b6a34c5f80abc1ae920a7bb37bdd8607489d8db
35,832
def trapezint(f, a, b, n): """ Uses trapezoid rule to find the integral of a function """ sum = 0.0 h = (b - a) / float(n) for counter in range(int(n)): sum += (1 / 2.0) * h * (f(a + counter * h) + f (a + (counter + 1) * (h))) return sum
3e03f1c53b2d8fbcf150a5d672b384308ef033b8
35,833
from typing import Dict import yaml import os def load_config(config_filepath: str) -> Dict: """ Loads a YAML config file and expands placeholders. """ with open(config_filepath, 'r') as stream: config = yaml.safe_load(stream) placeholders = { "${subdir_fname_without_ext}": os.path.splitext(config_filepath)[0] } # Expansion currently only works on top level! for attr_key, attr_val in config.items(): if isinstance(attr_val, str): for p_k, p_v in placeholders.items(): if p_k in attr_val: attr_val = attr_val.replace(p_k, p_v) config[attr_key] = attr_val return config
89a27a1af35b8a9b0d0283021da43dc43d69668c
35,835
def sum_freq(wl1, wl2): """ Input wavelength in nm """ return wl1 * wl2 / (wl1 + wl2)
e15ba5feba62c97ef167e753181fffd52ca8f269
35,836
def is_basic_type(signature): """Returns True if the signature is a basic type 'a', '(', '{', and 'v' are not considered basic types because they usually cannot be handled the same as other types.""" basic_types = ('b','d', 'g', 'i','n','o','q','s','t','u','x','y') return signature in basic_types
5a454a699e6e7c0f89806f3cdba12ce78e42477e
35,838
def unique(value): """Check that there is only one value in the list, and return it. >>> kb = KB({'John':{'eye_color': ['blue']}}) >>> unique(kb.get_attribute('John', 'eye_color')) 'blue' This is handy in the context of KB, where everything's a list but it's common to expect that there's only one value. """ if not value: return None if len(value)!=1: raise ValueError('Expected a single value, got multiple (%s)' % len(value)) return value[0]
c81dc2bfda37b89956aa41ce9646c7ef5a56e86d
35,839
import os def get_python_path(): """ return folder path """ if os.environ["PYTHONPATH"].endswith(";"): return os.environ["PYTHONPATH"][:-1] return os.environ["PYTHONPATH"]
2ba61aadc96e9be7a728d377e23baf7d65f5973b
35,840
import re def correct_namespace(name, api_name, env_name) -> bool: """ Checks that a name of a thing we want to create in Apigee matches our namespacing conventions. e.g. for api_name="canary-api" and env_name="internal-dev" |--------------------------------------------------------------+--------| | name | result | |--------------------------------------------------------------+--------| | "canary-api-internal-dev" | True | | "canary-api-extra-thing-internal-dev" | True | | "canary-apiinternal-dev" | False | | "canary-api-internal-dev-application-restricted" | True | | "canary-api-extra-thing-internal-dev-application-restricted" | True | |--------------------------------------------------------------+--------| :param name: Name of thing in Apigee. :param api_name: The meta.api.name item from your manifest :param env_name: The environment name (e.g. 'internal-dev', 'int', or 'prod') """ regex = f"^{api_name}(-[a-z]+)*-{env_name}(-[a-z]+)*$" return bool(re.match(regex, name))
8e812a5e2729779837b85eed29ff9bb7a4a05953
35,841
def str_builtin(): """str: Immutable strings.""" return str(b"you are my prot\xe9g\xe9", 'latin-1')
e6ffc3850ffc059ecdcf9aedf4e757b60c4e998f
35,843
def get_all_tables() -> str: """Generates a list of all the tables in the database. Returns: list: [(table_name_1,), (table_name_2,), ...] """ return "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
01713329ced8b4dd4e908440d2db8f0ee5963d5d
35,845
def confusion_matrix (dataset,subgroup,targetColumn): """Returns the confusion matrix of a dataset with a subgroup.""" total_rows = len(dataset) #Calculate the complement of the dataset over the subgroup complement = dataset[~dataset.index.isin(subgroup.index)] #Elements of confusion matrix subgroup_pos_target_rate = len(subgroup[subgroup[targetColumn] == 1]) / total_rows subgroup_neg_target_rate = len(subgroup[subgroup[targetColumn] == 0]) / total_rows complement_pos_target_rate = len(complement[complement[targetColumn] == 1]) / total_rows complement_neg_target_rate = len(complement[complement[targetColumn] == 0]) / total_rows return [[subgroup_pos_target_rate,complement_pos_target_rate], [subgroup_neg_target_rate,complement_neg_target_rate]]
bffd2c8f2aebc7d5e3f41373cfa8291797f122df
35,846
from typing import Iterable from typing import Tuple def unparse_accept_header(values: Iterable[Tuple[str, float]]) -> str: """Like werkzeug.datastructures.MIMEAccept(values).to_header().""" parts = [] for value, quality in sorted(values, key=lambda t: t[1], reverse=True): if quality != 1: value = f"{value};q={quality}" parts.append(value) return ','.join(parts)
014cdab8c4e623bd505c3319ba3023f8a547543d
35,847
from typing import Any from typing import Callable def _map(obj: Any, fn: Callable) -> Any: """Recursively maps a function to a nested object. If the passed object is a list, dictionary, set, or tuple, then all child elements are recursively mapped. Args: obj: The object to map against fn: The function to map Returns: The mutated object """ if isinstance(obj, dict): return {k: _map(v, fn) for k, v in obj.items()} elif ( isinstance(obj, list) or isinstance(obj, set) or isinstance(obj, tuple) ): return [_map(v, fn) for v in obj] else: return fn(obj)
cecbfec90f2a870624b9f5ff51d5f4b9ed3865c7
35,849
import yaml def cmd_checks(): """ Test fixture setup to load in the relevant test data """ with open("tests/data/cmd_checks.yaml", "r") as handle: checks = yaml.safe_load(handle) return checks["cmd_checks"]
0bf627e255ee9aedfdec840c2106f85a19cc222d
35,850
import ipaddress def is_private_cidr(cidr: str) -> bool: """Check if cidr is not too broad and cover public networks.""" return ipaddress.ip_network(cidr).is_private
22510000fe6584a863a44ee7d4f654bdb2260b46
35,851
import re def validate_ip_address_regex(ip: str) -> str: """ validates an IP address using REGEX. This will validate where there are leading zeros as is outlined in the problem description Time complexity: O(1) because the patterns to match have constant length. Space complexity: O(1). """ # use 'r' to indicate raw strings, to avoid problems with special characters chunk_ipv4 = r'([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' pattern_ipv4 = re.compile(r'^(' + chunk_ipv4 + r'\.){3}' + chunk_ipv4 + r'$') chunk_ipv6 = r'([0-9a-fA-F]{1,4})' pattern_ipv6 = re.compile(r'^(' + chunk_ipv6 + r'\:){7}' + chunk_ipv6 + r'$') if pattern_ipv4.match(ip): return "IPv4" return "IPv6" if pattern_ipv6.match(ip) else "Neither"
19a53f2d3305dc0cd2f07dd4590cbd7b68c0a0f9
35,853
def str2int(val, base=None): """String to integer conversion""" try: if isinstance(val, int) or val is None: return val elif base: return int(val, base) elif '0x' in val: return int(val, 16) elif '0b' in val: return int(val, 2) else: return int(val) except (ValueError, TypeError) as e: raise ValueError("Can't convert '%s' to int!" % val)
b4c9a4329670bf28f01b292f1686800f6186d487
35,854
def fn_number_cols(df): """ RECURSO SECUNDÁRIO PARA DETERNINAR O DTYPE POR ANÁLISE DO CONTEÚDO DA COLUNA """ lst_cols = [] for n in list(df.columns): i = df.columns.get_loc(n) # ÍNDICE DA COLUNA v = df.iloc[:, i] # VALOR NA LINHA ZERO try: if float(n): # SE PASSAR, É UM NÚMERO (Tenta converter para float) lst_cols.append(n) except: pass return lst_cols
59ddf56c44322c3c4a550b49768bba90139afc56
35,856
import token def is_append_to_all(line): """ Check if a line is an __all__.append line() @see: L{process_append_to_all} """ # __all__.append(string) if (len(line) == 4 and line[0] == (token.NAME, '__all__') and line[1] == (token.OP, '.') and line[2] == (token.NAME, 'append') and isinstance(line[3], list) and len(line[3]) == 3 and line[3][0] == (token.OP, '(') and line[3][1][0] == token.STRING): return True # __all__ += [string] if (len(line) == 3 and line[0] == (token.NAME, '__all__') and line[1] == (token.OP, '+=') and isinstance(line[2], list) and len(line[2]) == 3 and line[2][0][1] in '[(' and line[2][1][0] == token.STRING): return True
c4225aaed15b5a3c41ef8ff66ba72cbba5646a84
35,858
def get_commun_films(df_movies,Actor1,Actor2): """ Function that gives the movies in which two actors have played Parameters ---------- df_movies : dataframe IMDb movie database Actor1 : string name of the first actor entered by the user via the Tkinter interface Actor2 : string name of the second actor entered by the user via the Tkinter interface Returns ------- result : string result displayed in the graphical interface """ Actor1 = Actor1.lower() Actor2 = Actor2.lower() # 1 - Constitution du dataframe # Filtrage film avec le nom des acteurs df=df_movies[df_movies['actors_low'].str.contains(Actor1,na=False)] df=df_movies[df_movies['actors_low'].str.contains(Actor2,na=False)] # 2 - Extraction de l'information list_film=list(df['original_title']) liste="" for element in list_film: liste=liste+"- "+element+"\n" # 3- On stock toute l'information récupéré dans une variable char appelé result if liste=="" : result="Ces 2 acteurs n'ont pour l'instant joué dans aucun film ensemble." else: result="Ces 2 acteurs ont joué ensemble dans les films suivant :\n"+liste return result
d716138ff19b3a58c668a1f9b050e16bde927ffe
35,859
def echo_worker(data): """ Example of worker that simply echoes back the received data. :param data: Request data dict. :returns: True, data """ return data
d694b301aefdcb1631567b3e1c269b24aa827824
35,860
import math def evaluate(second, minimum=30, incr=15): """put the evaluation in a function - this makes it amenable to testing as an isolated operation. The approach here avoids transforming numbers to strings. It also avoids map() and filter() operations, instead applying Boolean logic - a little more transparent, I think. """ if second: ##a None object fails this test if second <= minimum: #minimum billing is 30 seconds return minimum #bill in <incr> second intervals, rounding up return math.ceil(second/incr) * incr #easier than dorking around with % else: #None object encountered return 0
2b252e3fdc05888514e138fb5c445d5892dedf19
35,861
def get_annots_by_chr(gene_coordinates, gene_expressions, gene_types, gene_metadata): """Merge coordinates and expressions, return Ideogram annotations """ annots_by_chr_by_group = {} # Some gene types (e.g. pseudogenes) exist in the genome annotation, # but are never expressed. Don't count these. for gene_id in gene_coordinates: coordinates = gene_coordinates[gene_id] gene_symbol = coordinates['symbol'] gene_type = coordinates['type'] lacks_gene_type = True for group in gene_expressions: if gene_symbol not in gene_expressions[group]: lacks_gene_type = False if lacks_gene_type is False: gene_types[gene_type] -= 1 # Sort keys by descending count value, then # make a list of those keys (i.e., without values) sorted_items = sorted(gene_types.items(), key=lambda x: -int(x[1])) sorted_gene_types = [x[0] for x in sorted_items] first_key = list(gene_expressions.keys())[0] for gene_id in gene_coordinates: coordinates = gene_coordinates[gene_id] symbol = coordinates['symbol'] chr = coordinates['chromosome'] start = int(coordinates['start']) stop = int(coordinates['stop']) length = stop - start for group in gene_expressions: if symbol not in gene_expressions[group]: continue if group not in annots_by_chr_by_group: annots_by_chr_by_group[group] = {} if chr not in annots_by_chr_by_group[group]: annots_by_chr_by_group[group][chr] = {'chr': chr, 'annots': []} expressions = gene_expressions[group][symbol] annot = [ symbol, start, length, sorted_gene_types.index(coordinates['type']) ] + gene_metadata[symbol] # Convert numeric strings to floats, and clean as needed for i, exp_value in enumerate(expressions): if exp_value == 'NA': # print(symbol + ' had an "NA" expression value; setting to -1') exp_value = -1 annot.append(round(float(exp_value), 3)) annots_by_chr_by_group[group][chr]['annots'].append(annot) return [annots_by_chr_by_group, sorted_gene_types]
077a1ec7744b8b92f0066240b7343ecc6a135435
35,862
from typing import Sequence from typing import Tuple def get_default_powerup_distribution() -> Sequence[Tuple[str, int]]: """Standard set of powerups.""" return (('triple_bombs', 3), ('ice_bombs', 3), ('punch', 3), ('impact_bombs', 3), ('land_mines', 2), ('sticky_bombs', 3), ('shield', 2), ('health', 1), ('curse', 1))
1e125dfe64627b25e56e9f905d4fc8cc1a878684
35,863
import os def check_path(pth=None): """ use the local directory if not path is give Args: pth (str): path to directory Returns: str: path to directory """ if pth == '': return pth elif pth is None: return '' elif os.path.isdir(pth): return pth else: raise UserWarning('Path is not available')
ad79927a8f24bb51185e6294e174d2428b35e442
35,864
def make_wellcome_message(login) -> str: """This function format message during login procedure.""" return 'hello {}'.format('my love' if login == 'johnny' else login)
37ad6b7d997876791dc65769753002b368e354b4
35,865
import math def calcStep(step): """ calculate step """ if step == 0: return 1 fact = math.floor(math.log10(step)) fraction = float(step) / math.pow(10, fact) if fraction < 1.5: fraction = 1 elif fraction < 3: fraction = 2 elif fraction < 7: fraction = 5 else: fraction = 10 final_step = math.pow(10, fact) * fraction return final_step
04520d5a0596a0ffd38799d1d1c079561e757c78
35,866
def get_etr_dash_t(etr_t, C_tol, C_eff, C_bal, C_leak): """熱交換型換気設備の補正熱交換効率 (-) (1) Args: etr_t(float): 熱交換型換気設備の温度交換効率 (-) C_tol(float): カタログ表示誤差による温度交換効率の補正係数 (-) C_eff(float): 有効換気量率による温度交換効率の補正係数 (-) C_bal(float): 給気と排気の比率による温度交換効率の補正係数 (-) C_leak(float): 排気過多時における住宅外皮経由の漏気による温度交換効率の補正係数 (-) Returns: float: 熱交換型換気設備の補正熱交換効率 (-) """ etr_dash_t = etr_t * C_tol * C_eff * C_bal * C_leak # (1) return etr_dash_t
a07177e6da85a93bee25d67cd29a19df48beae4c
35,867
def interpolate(x0, y0, x1, y1, x): """Linear interpolation between two values Parameters ---------- x0: int Lower x-value y0: int Lower y-value x1: int Upper x-value y1: int Upper y-value x: int Requested x-value Returns ------- int, float Interpolated y-value """ y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0) return y
082cc92c4c170dbba479e396731326e450b5d765
35,868
def menu(prompt, items): """Constructs and shows a simple commandline menu. Returns an index of the provided items sequence.""" for i in range(len(items)): print(str(i + 1) + ": " + items[i]) result = None while True: result = input(prompt) try: result = int(result) except ValueError: print("error: Input must be a number. Please try again.") continue if result - 1 >= len(items) or result < 1: print("error: Provided option not in range. Please try again.") continue return result - 1
7d7c2218112a617387b34dd89286801dce7d11f8
35,870
from typing import Dict from typing import Union from typing import Any from typing import List def key_to_index(values: Dict[Union[str, int], Any], ordered_keys: List[Union[str, int]]) -> Dict[int, Any]: """ Replace keys of a `dict` with its index in ordered list. :param values: A dictionary. :param ordered_keys: Ordered keys. :returns: New dictionary where keys are replaced. """ def index(k): if isinstance(k, int): return k else: return ordered_keys.index(k) return {index(k):v for k, v in values.items()}
86e0f5f34cb1d8d77e15e5a4ec11eda6b8c303ca
35,872
import requests def requests_adapter(url: str) -> dict: """An adapter that encapsulates requests.get""" resp = requests.get(url) return resp.json()
f57c03cf6573ba6043390a8a099125ecf3ba3315
35,874
def decipher(signature, cipher): """Decipher the signature.""" signature = list(signature) cipher = cipher.split(' ') for operation in cipher: n = int(operation[1:]) if operation[0] is 's': signature = signature[n:] elif operation[0] is 'r': signature = signature[::-1] elif operation[0] is 'w': signature[0], signature[n] = signature[n], signature[0] return ''.join(signature)
4ffb87edab7921912f0db29727d9e888ff2eed52
35,876
import logging def validate_params(params): """ Args: params (d): genome_ref (str) output_name (str) """ for x in ["genome_ref", "output_name"]: if x not in params: raise Exception(f"Expecting parameter {x} as an input, but not found. " + ", ".join(params.keys())) if len(params["genome_ref"].split("/")) != 3: raise Exception(f"Expecting genome ref in format 'A/B/C', instead got {params['genome_ref']}") if " " in params['output_name']: raise Exception(f"Output name cannot contain spaces. Output name: {params['output_name']}") test_bool = False if 'app_test' in params: test_bool=True if "test_num" in params: logging.info("Running test number " + params["test_num"]) # Upload gene table? Deprecated. #upload_bool = False return [params["genome_ref"], params["output_name"], test_bool]
67ff5d52b06ec00aee47d04f7c2d66748ff6f5b2
35,877
import math def math_round(number: float, decimals: int = 0) -> float: """Округлить математическиим (не банковским) способом. Работает обычным математическим образом, в отличие от встроенной функции round(), которая использует банковское округление. :param number: число, которое требуется округлить :param decimals: сколько разрядов после запятой оставить :return: округлённое число с плавающей запятой >>> math_round(2.735, 2) 2.74 >>> round(2.735, 2) 2.73 """ if math.isnan(number): return math.nan exp = number * 10 ** decimals if abs(exp) - abs(math.floor(exp)) < 0.5: return math.floor(exp) / 10 ** decimals return math.ceil(exp) / 10 ** decimals
a223494af85a016ed8b1c0e3ffe6aa9593bd8da2
35,878
def indent_string(string, indent=' ', include_first=True, include_last=False): """ Indent a string by adding indent after each newline. :param string: The string to indent :param indent: The string to use as indentation :param include_first: Also indent the first line of the string (before the first newline) :param include_last: If the string ends on a newline, also add an indent after that. :return: A new string. """ base = string.replace('\n', '\n' + indent) if include_first: base = indent + base if not include_last and base.endswith('\n' + indent): base = base[:-len(indent)] return base
95bc16848fe6e095677f4a95a517b43fb10fd315
35,879
import sys def package_path(): """ Returns the absolute path to this package base directory """ package_name = 'division_detection' mjhmc_path = [path for path in sys.path if package_name in path][0] if mjhmc_path is None: raise Exception('You must include {} in your PYTHON_PATH'.format(package_name)) prefix = mjhmc_path.split(package_name)[0] return "{}{}".format(prefix, package_name)
ec46a30051ec760c4c33d7a146c42004212a23ef
35,880
def generate_TF(corpus, corpus_dict): """ Function to generate TF for queries. :param corpus: Corpus of words in the queries. :param corpus_dict: Mapping of query number and its corpus. :return tf_dict: Term Frequency Mapping. """ tf_dict = dict() for document_number, words_in_it in corpus_dict.items(): tf_dict_each = dict() for doc_word in words_in_it: tf_dict_each[doc_word] = words_in_it.count(doc_word) / len(words_in_it) tf_dict[document_number] = tf_dict_each return tf_dict
4967f6968c4974fa3c8b400c9fec1a6b39f5ae43
35,881
import re def process_en(text): """英文字符串预处理""" text = re.sub('[!!]+', " ", text) text = re.sub('[??]+', " ", text) text = re.sub("[\"#\\$%&()◎—""-~()∩*+,-./:;:;;<=>@,。★、…【】《》“”‘’""·[\\]^_`{|}~#\\\]+", " ", text) text = re.sub("[とに一緒にèéêóも]+", " ", text) text = re.sub("[0-9]+", " ", text) filters = ['\t', '\n', '\x97', '\x96'] text = re.sub("|".join(filters), ' ', text) return text.strip().lower()
31de255f55298ebdf49e851b5f60046df94fb29c
35,883
def build_feature_dict_mapper(feature_names): """Build a function for tf.data.Dataset.map. Args: feature_names: List of feature names. Returns: A function converting tuples into (dictionary of features, label). """ def mapper(*tuple_args): d = {} for i in range(len(feature_names)): d[feature_names[i]] = tuple_args[i] return d, tuple_args[-1] return mapper
9b3837cf3d1ff7bcc39242d660c255863a8dc98c
35,884
def reshape_axis(ax, axis_size_pix): """reshape axis to the specified size in pixels this will reshape an axis so that the given axis is the specified size in pixels, which we use to make sure that an axis is the same size as (or an integer multiple of) the array we're trying to display. this is to prevent aliasing NOTE: this can only shrink a big axis, not make a small one bigger, and will throw an exception if you try to do that. Arguments --------- ax : `matpotlib.pyplot.axis` the axis to reshape axis_size_pix : `int` the target size of the axis, in pixels Returns ------- ax : `matplotlib.pyplot.axis` the reshaped axis """ if ax.bbox.width < axis_size_pix[1] or ax.bbox.height < axis_size_pix[0]: raise Exception("Your axis is too small! Axis size: ({}, {}). Image size: ({}, {})".format( ax.bbox.width, ax.bbox.height, axis_size_pix[1], axis_size_pix[0])) bbox = ax.figure.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted()) fig_width, fig_height = bbox.width*ax.figure.dpi, bbox.height*ax.figure.dpi rel_axis_width = axis_size_pix[1] / fig_width rel_axis_height = axis_size_pix[0] / fig_height ax.set_position([*ax.get_position().bounds[:2], rel_axis_width, rel_axis_height]) return ax
5a029753014ebb4af4683be3a1d50ae4130ccc32
35,885
import numpy def getClosestRotMat(M): """ Computation of the closest rotation matrix R of a given matrix M (avoids computational errors.) Attributes: M: rotation matrix Return: R: rotation matrix """ u , s , v = numpy.linalg.svd(M) R = numpy.dot(u,v) s = numpy.eye(3) * s if (numpy.linalg.det(R)<0): s[0,0] = 1 s[1,1] = 1 s[2,2] = -1 R=numpy.dot(numpy.dot(u,s),v) return R
15ee28aa9a2df64b342853edba82654b0e3b430f
35,886
def to_str(object): """RETURN : None if unable to convert to str""" return str(object)
f8c68b2cf902135f7b7ae186a52fdabbd2e1faa3
35,888
def get_cpu_mem_network_usage(querier, query_window): """ Performs Prometheus queries relevant to cpu, memory, and network IO usage :param querier: An object containing information needed to query the Prometheus API :param query_window: The window over which rates are computed by Prometheus :return: Json containing information about cpu, memory, and network IO usage """ res = {} cpu_query = f'(rate(container_cpu_usage_seconds_total{{namespace="stackrox"}}[{query_window}]) * 100)' cpu_query_name = 'cpu_usage' res[cpu_query_name] = querier.get_stats_for_query(cpu_query) mem_query = '(container_memory_usage_bytes{namespace="stackrox"})' mem_query_name = 'mem_usage' res[mem_query_name] = querier.get_stats_for_query(mem_query) res[mem_query_name]['units'] = 'bytes' metric_names = { 'container_network_receive_bytes_total': 'network_received', 'container_network_transmit_bytes_total': 'network_transmited' } for name in metric_names: query = f'({name}{{namespace="stackrox"}})' query_name = metric_names[name] res[query_name] = querier.get_stats_for_query(query) res[query_name]['description'] = f'Total {metric_names[name]}' res[query_name]['units'] = 'bytes' rate_query = f'(rate({name}{{namespace="stackrox"}}[{query_window}]))' rate_query_name = f'rate_{metric_names[name]}' res[rate_query_name] = querier.get_stats_for_query(rate_query) res[rate_query_name]['units'] = 'bytes per second' return res
7861a040d6d1ca8a70b78a6c0f0803c66d22b60c
35,890
import os from shutil import copyfile def cfgdir(tmpdir_factory): """Prepare configuration directory for cloudselect.""" tmp = tmpdir_factory.mktemp("cloudselect") src = os.path.join(os.path.dirname(__file__), "fixture", "cloud.json") dst = os.path.join(str(tmp), "cloud.json") copyfile(src, dst) return tmp
eda5c70b89b79509726293cbfd5d519676a69d7f
35,891
import csv import io def read_model_analysis_csv(csvfile): """ Reads CSV generated from a spreadsheet of the same form as 'EXAMPLE template results spreadsheet v2 warming levels'. Returns the model analyses as a list of Dicts. """ # List of keys corresponding to the column headings for v2 of the template keys = ['dataset', 'n_members', 'experiment', 'statmodel', 'seasonalcycle', 'spatialpattern', 'sigma', 'sigma_min', 'sigma_max', 'xi', 'xi_min', 'xi_max', 'statprop', 'conclusion', 'include_model', 'threshold10y', 'GMSTnow', 'PR', 'PR_min', 'PR_max', 'Delta_I', 'Delta_I_min', 'Delta_I_max', 'GMSTfuture', 'PR_future', 'PR_min_future', 'PR_max_future', 'Delta_I_future', 'Delta_I_min_future', 'Delta_I_max_future'] # CSV uploaded as bytes - assume UTF-8 encoding csv_reader = csv.reader(io.StringIO(csvfile.read().decode('utf-8'))) rows = [] parse_rows = False for values in csv_reader: if parse_rows: # Zip up the values in the row with the keys for each column heading params = {k:v for k,v in zip(keys,values)} # If the 'Include model?' field is empty (or just whitespace) then assume we have reached the end of the input rows if not params['include_model'] or params['include_model'].isspace(): print('Log: Found row with empty "Include model?" field. Stopping CSV parsing.') break rows.append(params) else: # Skip rows until 'Model' heading appears in first column. # Once found, skip the following line (that contains a description # of the column) and begin parsing rows. if values[0] == 'Model': next(csv_reader, None) parse_rows = True continue else: continue return rows
62a32a66ba56c487ee2ef12ac1518f4be92f04d5
35,894
import os def __get_dataset_path() -> str: """Returns the path to the ratings.jl interim datset.""" src_features_path = os.path.join(os.path.dirname(__file__)) src_path = os.path.dirname(src_features_path) coffee_analytics_path = os.path.dirname(src_path) dataset_path = coffee_analytics_path + '/data/interim/ratings.jl' return dataset_path
f9121e6540221623f862b000440fbf953fe26eac
35,895
from datetime import datetime def now(): """ UTC datetime string with format yyy-mm-ddThh:mm.sssZ """ dt = datetime.utcnow() r = dt.isoformat() if dt.microsecond: r = r[:23] + r[26:] r += 'Z' return r
8f6f7c2eb02066c74e3dbf7656efd43ee26ed0ff
35,897
def configure_policy(dims, params): """configures the policy and returns it""" policy = None # SomePolicy(dims, params) return policy
d64f6990e06fed7ac1236a06650999c6a43a47de
35,902
import typing import asyncio async def exec_as_aio( blocking_fn: typing.Callable[..., typing.Any], *args: typing.Any ) -> typing.Any: """Asynchronously run blocking functions or methods. Args: blocking_fn (Callable[..., Any]): The blocking function/method. Returns: Any: The return value of the blocking function/method. """ loop = asyncio.get_running_loop() return await loop.run_in_executor(None, blocking_fn, *args)
0f8d0eb069ad8f33534931b4b9134486a641fa47
35,903
import os def obvers_path(g_speak_home): """Assembles the path for the ob-version executable""" return os.path.join(g_speak_home, 'bin', 'ob-version')
368d16d0caee347122ad4726f5f5d0c4b6c95a99
35,904
import re def did_parse(did): """ Parse a DID into it's parts. :param did: Asset did, str. :return: Python dictionary with the method and the id. """ if not isinstance(did, str): raise TypeError(f'Expecting DID of string type, got {did} of {type(did)} type') match = re.match('^did:([a-z0-9]+):([a-zA-Z0-9-.]+)(.*)', did) if not match: raise ValueError(f'DID {did} does not seem to be valid.') result = { 'method': match.group(1), 'id': match.group(2), } return result
a0ed14d68aac933ead173b53ba26a80c1e6c83fd
35,905
import argparse def create_cli_parser() -> argparse.ArgumentParser: """Returns ArgumentParser for command line interface.""" parser = argparse.ArgumentParser() parser.add_argument("config", nargs="?", help="path to yaml configuration file") return parser
24497c28600a67c51cbf49bd6a4a1f3dfc8f5dcc
35,906
def base_uri(host, is_ssl=False): """ return the host uri """ if is_ssl: scheme = "https" else: scheme = "http" return "%s://%s" % (scheme, host)
6992f6b7a4cd1ef73b489b65180dcfa139717fbf
35,908
def multicall2_addr(): """Address of Multicall2""" return "0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696"
294afe6f8a48a20fb95f4cb9b29292c178142c95
35,909
def max_standard_deviation(window): """Return the maximal spatial velocity standard deviation in a window over all the times.""" max_std = 0 for i in range(window.shape[1]): current_std = window[:, i].std() max_std = max(max_std, current_std) return max_std
dd99de7170ce942b70b34f44cc50aff7da03091c
35,910
def update_active_output_renditions_metric(ml_channel_id, ml_channel_name, ml_channelgroup_names): """Update the metrics of the "Active Output Renditions (avg)" dashboard dashboard widget""" results = [] for groupname in ml_channelgroup_names: entry = ["MediaLive", "ActiveOutputs", "OutputGroupName", groupname, "ChannelId", ml_channel_id, "Pipeline", "0", {"label": ml_channel_name + "-0"}] results.append(entry) entry = ["MediaLive", "ActiveOutputs", "OutputGroupName", groupname, "ChannelId", ml_channel_id, "Pipeline", "1", {"yAxis": "right", "label": ml_channel_name + "-1"}] results.append(entry) return results
77f9438b17456db44fc6d1c8c2c6f85896cdcdf3
35,911
import re def clean_postcode(postcode): """ Cleans postcode Args: postcode - Postcode; 'v' attribute of addr:postcode Returns: Cleaned postcode """ verbose = True pattern = re.compile(r'^\d{5,6}$') # valid codes are 5 or 6-digits match_result = pattern.search(postcode) if not match_result: # process non-compliant codes result = re.sub(r'[^\d]', '', postcode) # clean up malformed codes else: # do not process compliant codes return postcode if len(result) != 6: # if not compliant after clean up, set default postcode result = "000000" if verbose: print("original: {}, cleaned: {}".format(postcode, result)) return result
86a332c3ed07410f1e79843ff4e05b7dd403c411
35,914
import datetime def get_job_status(job_run_details_list): """ Processes the given list of dictionaries of glue job run details. ----------------------------------------------------------------- Required Parameter: job_run_details_list Ex - get_job_status(job_run_details_list) ________________________________________________________ Returns a list of dictionary with 'JobName' and 'Status' -------------------------------------------------------- Note:- If the job StartDate doesn't match datetime.date.today(), 'YET TO START' is returned as status. Else 'JobRunState' is returned. """ today = datetime.date.today() job_status_list = [] for job in job_run_details_list: job_status_details = {} job_status_details['JobName'] = job['JobRuns'][0]['JobName'] if job['JobRuns'][0]['StartedOn'].date() != today: job_status_details['Status'] = 'YET TO START' else: job_status_details['Status'] = job['JobRuns'][0]['JobRunState'] job_status_list.append(job_status_details) return job_status_list
08a854cffdb3ce457bf56036d0c12e33517db210
35,915
def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q
ec2df1665f187928ebbace1b62a046d149378990
35,917
import random def prune_non_seed_interactions_at_given_percentage(graph, percentage, reserved_nodes): """ Randomly selects percentage% of edges and removes them (provided that they dont have any connection with a node in reserved_nodes) """ new_graph = graph.copy() nodes = new_graph.nodes() candidate_nodes = set(nodes) - set(reserved_nodes) edges = graph.edges() candidate_edges = [ edge for edge in edges if edge[0] in candidate_nodes and edge[1] in candidate_nodes ] count = int(round(len(edges) * float(percentage) / 100)) random.shuffle(candidate_edges) #i = 0 #for edge in candidate_edges: for edge in candidate_edges[:count]: #if i < count: # new_graph.remove_edge(edge[0], edge[1]) #i += 1 new_graph.remove_edge(edge[0], edge[1]) #if i < count: if len(candidate_edges) < count: print("Warning: Pruning percentage is not achieved due to reserved nodes") return new_graph
2adad0e5d3fd7d74b24daa21c6eea341e023996b
35,922
import os def _num_cpus_windows(): """Return the number of active CPUs on a Windows system.""" return os.environ.get("NUMBER_OF_PROCESSORS")
eaf81cc77d1047ae42e330fb342491b599a4f524
35,924
def get_bounds(params): """Gets the bounds of the parameters A list of ``(min, max)`` pairs will be returned. And the None value for the unbound parameters will be kept. :param params: An iterable for the model parameters. :returns: The list of bounds for the parameters. :rtype: list """ return [ (i.lower, i.upper) for i in params ]
5fffe3c863e57de8f141b7a742e1b6ac65c8fc94
35,925
def collapse(li: list, axis: str) -> list: """Collapse a fence to a single element.""" if axis == "hori": old_tokens = ["+", "-"] new_token = "|" elif axis == "vert": old_tokens = ["+", "|"] new_token = "-" else: raise ValueError("axis must be 'hori' or 'vert'") new_li = [] plus_count = 0 for x in li: if x == "+": plus_count += 1 if plus_count % 2 == 1: new_li.append(new_token) if x in old_tokens: continue new_li.append(x) return new_li
877c5d3cf9085a3333cb8707db7ae06bd23d61ca
35,927
def gen_verbinder_map(xmldoc): """produce dict with boolean values to check if a given link is a verbinder :param xmldoc: input VISSIM xml :type xmldoc: xml.dom.minidom.Document :return: map of VISSIM link id -> bool flag if link is 'Verbinder' :rtype: dict """ # simple implementation by static variable; xmldoc arg is in the way # if not hasattr(gen_verbinder_map, "v_dic"): # gen_verbinder_map.v_dic = dict() # doesn't exist yet, so initialize is_verbinder_d = dict() for link in xmldoc.getElementsByTagName("link"): if len(link.getElementsByTagName("fromLinkEndPt")) > 0: is_verbinder_d[link.getAttribute("no")] = True else: is_verbinder_d[link.getAttribute("no")] = False # returning a dict... return is_verbinder_d
bf0c179273e63254e772bdb8cd1ba9a4c4915f3b
35,929
import types from contextlib import suppress def EIA_filename_identifier(): """Create a list of the EIA files identified in the imported modules. This function takes a list of all of the imported modules and looks within each for a class called EIAData. If such a class is found, the strings for the file names of the EIA data imported by that module are added to a list of filenames. Returns: A list of filenames from the classes that identify the EIA data files from all of the imported modules. """ # Initialize the list of filenames filenames = [] # Loop over all of the globals present in this module and check # each to determine if it is a module for name, val in globals().items(): # Note that 'name' is the reference name for the module, but # 'val' is the module reference; for example, in the statement # 'import os as kicker', name = 'kicker' and val refers to 'os' if isinstance(val, types.ModuleType): # Ignore the AttributeError exception raised for modules # that do not have an EIAData attribute with suppress(AttributeError): # Create an EIAData class object for the module # identified by 'name' data_class = getattr(globals()[name], 'EIAData') # Extract the values in __dict__ from the EIAData class # and append the resulting list to the existing # filenames list filenames = filenames + list(data_class().__dict__.values()) return filenames
4af5cdcd4baeacc13d4371de506c2eed1dd731e5
35,930
def _get_sample_count(*lsts): """Get sample count of a dataset. :param *lsts: variable number of lists. :return: sample count of this dataset, if all lists match, else None. """ if all(len(lst) == len(lsts[0]) for lst in lsts): sample_count = len(lsts[0]) else: sample_count = None return sample_count
d8190c6b6b8a74f54acb1ca80f23b19ffb4f8e26
35,931