content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def make_dict(typ, *values): """Implement `make_dict`.""" return dict(zip(typ.entries.keys(), values))
e4a9a488f77a4ec0f5305c4ea67a25656f19a820
50,753
from yaml import full_load as yload import json def load_in_file(in_file): """Here goes the routine to compute the descriptors according to the state file(s)""" with open(in_file, 'r') as stream: try: state_str = yload(stream) except: state_str = json.load(stream) state = {} for k, s in state_str.items(): if isinstance(s, str): state[k] = json.loads(s) else: state[k] = s return state
f8bb97e07cff43da871f6e785edf10f9a77aa0a0
50,754
def create_grid_string(dots, xsize, ysize): """ Creates a grid of size (xx, yy) with the given positions of dots. """ grid = "" for y in range(ysize): for x in range(xsize): grid += "." if (x, y) in dots else "#" grid += "\n" return grid
b1653b8e710edfe49b3d54e113b6709e51fb92b7
50,755
def dni_to_value(scale_value): """ This method will transform a string value from the DNI scale to its confidence integer representation. The scale for this confidence representation is the following: .. list-table:: DNI Scale to STIX Confidence :header-rows: 1 * - DNI Scale - STIX Confidence Value * - Almost No Chance / Remote - 5 * - Very Unlikely / Highly Improbable - 15 * - Unlikely / Improbable - 30 * - Roughly Even Chance / Roughly Even Odds - 50 * - Likely / Probable - 70 * - Very Likely / Highly Probable - 85 * - Almost Certain / Nearly Certain - 95 Args: scale_value (str): A string value from the scale. Accepted strings are "Almost No Chance / Remote", "Very Unlikely / Highly Improbable", "Unlikely / Improbable", "Roughly Even Chance / Roughly Even Odds", "Likely / Probable", "Very Likely / Highly Probable" and "Almost Certain / Nearly Certain". Argument is case sensitive. Returns: int: The numerical representation corresponding to values in the DNI scale. Raises: ValueError: If `scale_value` is not within the accepted strings. """ if scale_value == 'Almost No Chance / Remote': return 5 elif scale_value == 'Very Unlikely / Highly Improbable': return 15 elif scale_value == 'Unlikely / Improbable': return 30 elif scale_value == 'Roughly Even Chance / Roughly Even Odds': return 50 elif scale_value == 'Likely / Probable': return 70 elif scale_value == 'Very Likely / Highly Probable': return 85 elif scale_value == 'Almost Certain / Nearly Certain': return 95 else: raise ValueError("STIX Confidence value cannot be determined for %s" % scale_value)
13b80d6b0c12fbc36d59db9d6626cf747beca627
50,756
import argparse def get_arguments(): """Parse all the arguments provided from the CLI. Returns: A list of parsed arguments. """ parser = argparse.ArgumentParser(description="PSPnet Network") # optimatization configuration parser.add_argument("--is-training", action="store_true", help="Whether to updates the running means and variances during the training.") parser.add_argument("--learning-rate", type=float, default= 0.00025, help="Base learning rate for training with polynomial decay.") #0.001 parser.add_argument("--weight-decay", type=float, default= 0.0005, help="Regularization parameter for L2-loss.") # 0.0005 parser.add_argument("--momentum", type=float, default= 0.9, help="Momentum component of the optimiser.") parser.add_argument("--power", type=float, default= 0.9, help="Decay parameter to compute the learning rate.") # dataset information parser.add_argument("--dataset", type=str, default='cityscapes', help="voc12, cityscapes, or pascal-context.") parser.add_argument("--random-mirror", action="store_true", help="Whether to randomly mirror the inputs during the training.") parser.add_argument("--random-scale", action="store_true", help="Whether to randomly scale the inputs during the training.") parser.add_argument("--not-restore-last", action="store_true", help="Whether to not restore last (FC) layers.") parser.add_argument("--random-seed", type=int, default= 1234, help="Random seed to have reproducible results.") parser.add_argument('--logFile', default='log.txt', help='File that stores the training and validation logs') # GPU configuration parser.add_argument("--cuda", default=True, help="Run on CPU or GPU") parser.add_argument("--gpus", type=str, default="3", help="choose gpu device.") #使用3号GPU return parser.parse_args()
e3fb21857b44a37fef7b930bcb5345c42005a780
50,757
from typing import Sequence from typing import Generator def dictionize(fields: Sequence, records: Sequence) -> Generator: """Create dictionaries mapping fields to record data.""" return (dict(zip(fields, rec)) for rec in records)
ca179ef5d03e8d5860c36872a61405b6a33ad463
50,758
import yaml import getpass import platform def get_root_dir(config_file, dir_tag="root_dir", default_dir = None): """ Read db_root entries for current user and current machine Args: config_file (): dir_tag (): default_dir (): Returns: """ if default_dir : return default_dir else: with open(config_file , "r") as fi: db_roots_dict = yaml.safe_load(fi) user = getpass.getuser() if user not in db_roots_dict: raise RuntimeError(f"train.py::error::cannot find user {user} in db_root.yaml!" f" either add your db root dirs into db_roots.yaml or specify root dir on the command line ") user_roots = db_roots_dict[user] hostname = platform.node().split(".")[0] # handle jeanzay where we may have different suffixes for hh in user_roots: if hostname.startswith(hh): hostname = hh if hostname not in user_roots: raise RuntimeError(f"train.py::error::cannot find host {hostname} for user {user} in db_roots.yaml, " f"either add your db root dirs into db_roots.py or specify root dir on the command line ") rootdir_config = user_roots[hostname] if dir_tag not in rootdir_config: raise RuntimeError(f"train.py::error::cannot find dir info for {dir_tag} in {config_file}!" f" either add your db root dirs into db_roots.yaml or specify root dir on the command line ") rootdir = rootdir_config[dir_tag] return rootdir
972b61e3e30f5b685d0a03219bb3b69753aa834b
50,759
import warnings def ratio(class_label, data): """Compute ratio of all data that is `class_label`.""" try: return float(data.count(class_label)) / len(data) except ZeroDivisionError: warnings.warn("data {} is empty".format(id(data))) return 0.
7742804261d3aca98dd9ba19e083fd5c158310b9
50,760
def calc_force(force, new_closes, new_volumes): """ INPUTS: force (Series) - A copy of last record's force value. new_closes (Series) - New values, prepended with last closing price new_volumes (Series) - New values, prepended with last volume OUTPUT: Force index as a pandas Series. """ for n,index in enumerate(new_closes[1:].index, start=1): force[index] = ( new_closes[n] - new_closes[n-1] )*new_volumes[n] return force
30034d0a142962094f8b8eb02d71eee7d4088622
50,761
def import_devices(client): """ Imports all devices from the Rayleigh Connect API :param client: An instance of the RayleighClient class :return: A JSON object containing devices and their parameters, retrieved from the Rayleigh Connect API """ return client.retrieve_devices()
83ca27b0c25015b706f787aa44c20785a6bbeee1
50,762
def lat_to_ibz(lat): """Returns the Bravais lattice ibz code based on the input string code. e.g. lat='bcc' or lat='fco' :param lat: lattice string code :type lat: str :returns: ibz code corresponding to the Bravais lattice given by the 'lat' key :rtype: int """ # One should add some error hangling here. ltoi = {'sc': 1, 'fcc': 2, 'bcc': 3, 'hcp': 4, 'st': 5, 'bct': 6, 'trig': 7, 'so': 8, 'baco': 9, 'bco': 10, 'fco': 11, 'sm': 12, 'bacm': 13, 'bcm': 13, 'stric': 14, 'B2':1,'L12':1} if not lat in ltoi.keys(): print("ERROR: Unknown lattice "+lat) print("Exit now!") exit(1) return ltoi[lat]
2c434b69b3ec21ad93d60ee8e48f40f087e6a369
50,763
def update_number_of_orientations(integration_density, integration_volume): """ Update the number of orientation for powder averaging. Option for advance modal. """ ori = int((integration_density + 1) * (integration_density + 2) / 2) if integration_volume == 0: return f"Averaging over {ori} orientations." if integration_volume == 1: return f"Averaging over {4*ori} orientations."
bd54b18578eafdc59bf55284438b7d81d79c5509
50,764
def _cast_to_int(integer_string): """Cast a string to a integer.""" ret = int(integer_string) if ret < -1: raise ValueError() return ret
3eb4796f19467f95030ffdc8eb18eb7489329e98
50,765
import os def _find_setup_script_for_extension(top_path, source_dir='src', setup_py='setup.py'): """ Parameters ---------- top_path : str An entry for `os.walk`. source_dir : str Name of directory where source files locate. setup_py : str File name of the setup file for C extension. """ top_path = os.path.abspath(top_path) setup_files = [] for root, dirs, files in os.walk(top_path): if len(files) == 0: continue if source_dir in dirs and setup_py in files: fp = os.path.abspath(os.path.join(root, setup_py)) if fp == os.path.abspath(__file__): continue setup_files.append(fp) return setup_files
cf2955864da8b0a02b68be4dc87eff5c67b90637
50,767
def getFreePlaceValues(freePlaceMap): """ Returns the Amount of Free Places for every Area Marked on the FreePlaceMap by iterating over the Map and counting each Cell of each Area together The Returned list starts with Area 1 on Index 0 :param freePlaceMap: The generated FreePlaceMap :return: The Amounts of free Places for each Area on the FreePlaceMap """ if freePlaceMap is None or len(freePlaceMap) <= 1 or len(freePlaceMap[0]) <= 1: return values = [] # Iterate over the Map for y in range(len(freePlaceMap)): for x in range(len(freePlaceMap[0])): pointValue = freePlaceMap[y][x] if pointValue != -1: # Add one to the Area-Counter if len(values) >= pointValue: values[pointValue - 1] += 1 else: values.append(1) return values
a370310d26233b6dd93865b08528c505585fe9a4
50,768
def _add_system(query, data): """Add data from successful system MIB query to original data provided. Args: query: MIB query object data: Three keyed dict of data Returns: data: Aggregated data """ # Process query result = query.system() # Add tag for primary in result.keys(): for secondary in result[primary].keys(): for tertiary, value in result[primary][secondary].items(): data[primary][secondary][tertiary] = value # Return return data
cb37d05ac484f9a8f57b7a61afd9a40a971a23d0
50,770
def stat_limits(stats_dict, prof, stat, up_buff=None, low_buff=None): """Returns the highest and lowest values found in the s_c_dict provided. parameter: - stats_dict: The s_c_dict the min max values are wanted from - prof: The profile name the values are to be taken from as a string - stat: The stat the values are to be taken from as a string - up_buff: A value indicating the amount of buffer desired as a percentage of on the max value - low_buff: A value indicating the amount of buffer desired as a percentage of the minimum value returns: - s_min: The smallest value among those for a particular stat and weapon class provided - s_max: The largest value "" raises: """ s_min = 0 s_max = 0 for w_class in stats_dict[prof][stat].keys(): for weap in stats_dict[prof][stat][w_class].keys(): if stats_dict[prof][stat][w_class][weap] > s_max: s_max = stats_dict[prof][stat][w_class][weap] if stats_dict[prof][stat][w_class][weap] < s_min: s_min = stats_dict[prof][stat][w_class][weap] if up_buff is not None: s_max = s_max + abs(s_max*up_buff) if low_buff is not None: s_min = s_min - abs(s_min*low_buff) return s_min, s_max
634f0588e2de0d7fa884dbedc28e388a822cf6d3
50,771
import re def unflat(data, separator="_", lseparator=('[', ']')): """ Unflat the dict Args: data (dict): the dict to unflat. Must be a key/value dict. separator (:obj:`str`, optional): key separator. Returns: dict: Unflatted dict """ unflat_dict = {} for k in sorted(data.keys()): context = unflat_dict for sub_key in k.split(separator)[:-1]: if sub_key not in context: context[sub_key] = {} context = context[sub_key] key = k.split(separator)[-1] regex = r'(\w*)\{0}(\d)\{1}'.format(lseparator[0], lseparator[1]) match = re.match(regex, key) if match: lkey = match.group(1) lpos = int(match.group(2)) if not lkey in context: context[lkey] = [] context[lkey].insert(lpos, data[k]) else: context[key] = data[k] return unflat_dict
dd39961dca2ad7052c72e46cbd313ca6b45ac7f4
50,773
import socket def get_hostname(): """ Retrieves the hostname of the machine Returns: hostname (str): the hostname or "Unknown otherwise" """ hostname = "" try: hostname = socket.gethostname() if "." in hostname: hostname = hostname[: hostname.find(".")] except: hostname = "UNKNOWN" return hostname
9c44ef69bbf1df5cd0a5ae7c80038def468072b3
50,774
def _InitLookUp(alpha=0.05): """ Initialize the dictionary of the look up table based on the confidence level. The tabulated data are from Rorabacher, D.B.(1991) Analytical Chemistry 63(2), 139–46. :param alpha: confidence interval (double), accepted values: 0.1, 0.05, 0.01 :return: """ if alpha == 0.10: q_tab = [0.941, 0.765, 0.642, 0.56, 0.507, 0.468, 0.437, 0.412, 0.392, 0.376, 0.361, 0.349, 0.338, 0.329, 0.32, 0.313, 0.306, 0.3, 0.295, 0.29, 0.285, 0.281, 0.277, 0.273, 0.269, 0.266, 0.263, 0.26] elif alpha == 0.05: q_tab = [0.97, 0.829, 0.71, 0.625, 0.568, 0.526, 0.493, 0.466, 0.444, 0.426, 0.41, 0.396, 0.384, 0.374, 0.365, 0.356, 0.349, 0.342, 0.337, 0.331, 0.326, 0.321, 0.317, 0.312, 0.308, 0.305, 0.301, 0.29] elif alpha == 0.01: q_tab = [0.994, 0.926, 0.821, 0.74, 0.68, 0.634, 0.598, 0.568, 0.542, 0.522, 0.503, 0.488, 0.475, 0.463, 0.452, 0.442, 0.433, 0.425, 0.418, 0.411, 0.404, 0.399, 0.393, 0.388, 0.384, 0.38, 0.376, 0.372] else: print("Input alpha value not available") q_tab = [] Q = {n: q for n, q in zip(range(3, len(q_tab) + 1), q_tab)} return Q
18ccc897f4f0333e285957878a441e7e21a47877
50,775
def resolve_reset(tokens): """Resolve the reset to an location, but dont resolve the location to an address yet.""" tree = tokens[0] if tree.data != "symbol": raise Exception("Unknown reset type", tree.data) return tree
756642dde822d993f47a49a1e90958ff050fd1e1
50,776
def integral(x,y): """ ROUTINE: INTEGRAL USEAGE: RESULT = INTEGRAL( X, Y ) PURPOSE: Integrate tabulated data using Simpson's rule with 3-point Lagragian interpolation. Data may be regularly sampled in X, or irregularly sampled. INPUT: X Vector of x axis points. (Elements must be unique and monotonically increasing) Y Vector of corresponding Y axis points. KEYWORD_INPUT: None. OUTPUT: Result of integration. EXAMPLE: Example 1: Define 11 x-values on the closed interval [0.0 , 0.8]. X = [ 0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80 ] Define 11 f-values corresponding to x(i). F = [ 0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600, $ 2.84299, 3.50730, 3.18194, 2.36302, 0.231964 ] Compute the integral. RESULT = INTEGRAL( X, F ) In this example, the f-values are generated from a known function, (f = .2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5) The Multiple Application Trapezoid Method yields; result = 1.5648 The Multiple Application Simpson's Method yields; result = 1.6036 IDL User Library INT_TABULATED.PRO yields; result = 1.6232 INTEGRAL.PRO yields; result = 1.6274 The Exact Solution (4 decimal accuracy) yields; result = 1.6405 AUTHOR: Liam Gumley, CIMSS/SSEC (liam.gumley@ssec.wisc.edu) Based on a FORTRAN-77 version by Paul van Delst, CIMSS/SSEC 22-DEC-95 REVISIONS: None. """ n = x.size x0 = x[0:n-2] x1 = x[1:n-1] x2 = x[2:n-0] y0 = y[0:n-2] y1 = y[1:n-1] y2 = y[2:n-0] # # compute interpolation delta and midpoint arrays # dx = x1-x0 xmid = 0.5*(x1+x0) # # compute 3 point lagrange interpolation # l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2)) l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2)) l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1)) ymid = y0*l0 + y1*l1 + y2*l2; # # compute integral sum # integ = sum(1.0/6.0*dx*(y0+4.0*ymid+y1)) # # handle last 3 points similarly # x0 = x[n-3] x1 = x[n-2] x2 = x[n-1] y0 = y[n-3] y1 = y[n-2] y2 = y[n-1] dx = x2 - x1 xmid = 0.5*(x2+x1) l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2)) l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2)) l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1)) ymid = y0*l0 + y1*l1 + y2*l2; integ = integ + 1.0/6.0*dx*(y1+4.0*ymid+y2) return integ
1854833006bc1bdb0c2352f08a059de145ab9ba2
50,777
def unminify(soup, encoding='utf-8'): """ """ return soup.prettify().encode(encoding)
11b956f2fa61f237afb5931441f8add4320024d4
50,779
import torch def sparsity(M, tol=1.0e-3, device="cpu"): """ ========================================================================== \n Return the spacity for the input matrix M \n ----- INPUT \n M : (Tensor) the matrix \n tol : (Scalar,optional) the threshold to select zeros \n ----- OUTPUT \n spacity : (Scalar) the spacity of the matrix \n =========================================================================== """ if type(M) is not torch.Tensor: M = torch.as_tensor(M, device=device) M1 = torch.where(torch.abs(M) < tol, torch.zeros_like(M), M) nb_nonzero = len(M1.nonzero()) return 1.0 - nb_nonzero / M1.numel()
60ab6d9578d4f3285b5ca8ec7287ab1e68cb7618
50,781
def load_org2_gateway(network_config): """ Loads the `org2_gw` Gateway """ return network_config.get_gateway('org2_gw')
55f1770759f71a1ab730689920e17eec549cd95f
50,782
def int_addr(addr): """Gets the integer representation of an address""" return int(addr[1:])
3f00ae151bff20516fbaddda73feb147aa1c8424
50,783
def val_default(val, default=True): """Value or default (for attributes) """ if val is None: return default return val
6a7bb5a475428d36ad046b4e7a7099f2573eae76
50,784
def speak_next(most_recent: int, turn_num: int, nums_spoken: dict): """ If you want to optimize something in Python, it probably involves dictionaries or tuples. ~ My friend Miles """ if most_recent not in nums_spoken: saying = 0 else: saying = turn_num - nums_spoken[most_recent] nums_spoken[most_recent] = turn_num turn_num += 1 return saying, turn_num, nums_spoken
d7ab59dd70ae80168f3170e4a79944dc86a63256
50,785
import os def load_user_pass(): """ Loads the username and password from the environment :return: tuple - [username, password] """ # try to get the username from environment variable LBXD_USERNAME = os.environ.get("LBXD_USERNAME", None) class UsernameMissingError(Exception): pass if LBXD_USERNAME is None: raise UsernameMissingError("Auth methods require a Letterboxd username.") # try to get the user password from environment variable LBXD_PASSWORD = os.environ.get("LBXD_PASSWORD", None) class PasswordMissingError(Exception): pass if LBXD_PASSWORD is None: raise PasswordMissingError("Auth methods require a Letterboxd password.") return (LBXD_USERNAME, LBXD_PASSWORD)
4c29353b214c42a66831e2284e0ce1eaa21bbbf4
50,786
def frame_text(lines: list, style='╔═╗╚╝║'): """ Styles cheat-sheet: ╔═╗╚╝║ /–\\/| ┏━┓┗┛┃ """ max_width = max(len(line) for line in lines) top = style[0] + style[1]*max_width + style[2] + '\n' bottom = '\n' + style[3] + style[1]*max_width + style[4] return top + '\n'.join(style[5] + line.ljust(max_width) + style[5] for line in lines) + bottom
e2328d4bc5583aeb6fdcaf492391fddca3a5a98c
50,787
def res_server_error(): """ Default response for an unhandled exception (base internal server error). """ return { "type": "server_error", "code": "error", "detail": "A server error occurred.", "attr": None, }
093f57abc12b48a2c788b86f860ae4addca9d475
50,788
import sqlite3 import gzip def sqlite_from_file(filename, fmt=None): """Instantiate an sqlite3.Connection and return it, with the data copied in from the specified file. The sqlite3 connection is mapped in memory, not to the source file. Args: filename (str): path to the file. fmt (str): format of the input; see to_file for details. """ if fmt is None: fmt = 'sqlite' if filename.endswith('.gz'): fmt = 'gz' if fmt == 'sqlite': db0 = sqlite3.connect(f'file:{filename}?mode=ro', uri=True) data = ' '.join(db0.iterdump()) elif fmt == 'dump': with open(filename, 'r') as fin: data = fin.read() elif fmt == 'gz': with gzip.GzipFile(filename, 'r') as fin: data = fin.read().decode('utf-8') else: raise RuntimeError(f'Unknown format "{fmt}" requested.') db = sqlite3.connect(':memory:') db.executescript(data) return db
5cd9152d903d2a2c98fae676230c0a2fc3d4cfbb
50,791
def getspecificnodes(): """ Returns 20 specific nodes """ nodelist = [310, 510, 810, 1210, 9, 10, 61, 37, 38, 43, 23, 204, 1208] print("Got specific nodes") return nodelist
427baabdf5aaaaaacc20745bca992e198d80b017
50,792
def cs_water_Tp(T=298, p=1): """ Calculates chemical shift of water based on Temparature and pressure (according to Maciej). """ # according to Maciej cs = 5.945e-7*p*T -1.612e-4*p -1.025e-2*T + 7.866 return(cs)
de450a97255cd245ae02f2219bddc0b5f201bfec
50,793
from collections import Counter def test_byte_pair_encode(): """ BPE: A technique to encode a string with replacing most two frequent symbols with a single alpha, and keep repeating until the remaining symbols are singular """ def bpe(s): freq = [] for a,b in zip(s,s[1:]): freq.append(a+b) freq = Counter(freq) z = 'Z' for mc,f in freq.most_common(): if f>1: s = s.replace(mc, z) z = chr(ord(z)-1) return s assert bpe("hey") == "hey" assert bpe("foofoo") == "ZoZo" assert bpe("quick squad and duck or chicken") == "YiZ sYaVanVduZ or chiZen"
bb292404f03731da9ddfa7085907510b14bdd179
50,794
from typing import Set import re def get_filenames_from_git_log(git_log: str) -> Set[str]: """ :param git_log: :return: """ pattern = re.compile(r'--- a\/(.+)\n\+\+\+ b\/(.+)') match_tuples = re.findall(pattern, git_log) unique_matches = {file for match_tup in match_tuples for file in match_tup} return unique_matches
cb6e8d03c38a41452508e00373f05db9e89a9d29
50,797
from typing import Tuple from typing import List def read_network(file: str) -> Tuple[int, int, List[int]]: """ Read a Boolean network from a text file: Line 1: number of state variables Line 2: number of control inputs Line 3: transition matrix of the network (linear representation of a logical matrix) :param file: a text file :return: (n, m, L), where n: number of state variables m: number of control inputs L: network transition matrix """ with open(file, 'r') as f: n = int(f.readline().strip()) m = int(f.readline().strip()) N = 2 ** n M = 2 ** m line = f.readline().strip() assert line, f'network transition matrix must be provided!' numbers = line.split() assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns' L = [int(num) for num in numbers] for i in L: assert 1 <= i <= N, f'All integers in the network transition matrix must be in range [1, {N}]' return n, m, L
16ef7751cbd4af000ed33de7b065273225e35d76
50,798
import pickle def save_model(model, model_filepath): """ save the model at a desired location """ pickle.dump(model, open(model_filepath, 'wb')) return None
998eb9ff50dd383265ec636d82d3c05d781dfaa3
50,799
def filter_events_by_messages(events, ignore_github=False): """ Filter events so that only "m.room.message" events are kept. events should be a dict of room events as returned by ``get_all_events``. """ messages = {k: v[v['type'] == "m.room.message"] for k, v in events.items()} if ignore_github: messages = {k: v[v['sender'] != "@_neb_github_=40_cadair=3amatrix.org:matrix.org"] for k, v in messages.items()} return messages
ed41183a8fbef71d2a9ec016c6cca3b96d346edc
50,800
import threading def 线程_取线程标识符(): """ 返回当前线程的“线程标识符”。这是一个非零整数。它的值没有直接的意义。它旨在用作魔术Cookie,例如用于索引线程特定数据的字典。当一个线程退出并创建另一个线程时,线程标识符可以被回收。 """ return threading.get_ident()
21976541af161d5e5605ba308cd4342cc5de7151
50,803
import subprocess import json def execute_cmd_str(cmd: str, convert_to_json: bool): """ executes shell command and return string result :param convert_to_json: :param cmd: shell command :return: returns string result or None """ try: result: str = subprocess.check_output(cmd, shell=True, stderr=subprocess.DEVNULL).decode().strip() if convert_to_json: result = json.loads(result) return result except: return None
216d6f8083b4fd71afb1efe546569812e2fb3039
50,804
def remove_common_molecules(reactants, products): """ Removes common species between two lists leaving only reacting species. Parameters ---------- reactants, products : list of str List containing strings all molecular species. Returns ------- tuple of str Reduced lists for both reactants and products such that only species that participate in the reaction remain. """ reduced_react = reactants.copy() reduced_prod = products.copy() reduced_react.sort() reduced_prod.sort() if reduced_react == reduced_prod: raise Exception("Reactants and products are the same.") else: pass for mol in reactants: if mol in reduced_prod: reduced_prod.remove(mol) reduced_react.remove(mol) return (reduced_react, reduced_prod)
333b29afdd251bc6ad80910902fa7cb1433e493c
50,805
import random def random_string(length=6, password_safe=False): """Returns a random string with `length` characters of a-z, A-Z and the digits 0-9. :param length: number of characters to randomize (default 6 characters). :password_safe: set to `True` to exclude o, O and 0. """ if password_safe: CHOICES = 'aA1bB2cC3dD4eE5fF6gG7hH8iI9jJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ' else: CHOICES = '0aA1bB2cC3dD4eE5fF6gG7hH8iI9jJkKlLmMnNoOpPqQrRsStTuUvVwW' \ 'xXyYzZ' return ''.join(random.choice(CHOICES) for i in range(length))
7d38429fdf0dd11290077558f7eb598d4a43d01e
50,806
def distance_between_points_meters(x1, x2, y1, y2): """Distance between two points. Example of coordinate reference system in meters: SWEREF99TM """ return (((x2 - x1) ** 2) + ((y2 - y1) ** 2)) ** 0.5
cbdcdb3f47d68a3340ae2fbcb6a93d3564352398
50,808
def as_vcf(line): """ Returns a VCF-formatted line representation of the CONF file :param line: :return: """ line = line.split('\t') var_identifier = 'cov_{}|editID_{}:{}'.format(line[2], line[0], line[1]) return '{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}'.format( line[0], line[1], var_identifier, line[3], line[4], line[5], line[8], line[9], line[10], line[11] )
c4329bdac0833e814e2a44bcbf41cf742285697f
50,809
import os import json def basket_ball_wiki_es(wiki_client, init_indices): """ fill the elasticsearch WIKI_ES with a POI of basket ball """ filepath = os.path.join(os.path.dirname(__file__), "fixtures", "basket_ball_wiki_es.json") with open(filepath, "r") as f: poi = json.load(f) poi_id = poi["id"] wiki_client.index( index="wikidata_fr", body=poi, doc_type="wikipedia", id=poi_id, refresh=True ) return poi_id
072b528a1b6174954579660318f4893a9eef9873
50,810
def extend(rows, times=5): """Extend rows. Return the result.""" extended = rows length = len(rows) for row in extended: # Lengthen rows. for i in range((times - 1) * length): row.append(row[i] % 9 + 1) for i in range((times - 1) * length): # Add rows. extended.append([j % 9 + 1 for j in extended[i]]) return extended
867fdd9afdd92afc29e0921863f8207fe5d0f9f4
50,811
def _rummy(): """ Draw Him. """ return ['------------------------------------------------------------------------------------------------------------', 'MB###BHHHBBMBBBB#####MBBHHHHBBBBHHAAA&GG&AAAHB###MHAAAAAAAAAHHAGh&&&AAAAH#@As;;shM@@@@@@@@@@@@@@@@@@@@@@@@@@', 'MGBMHAGG&&AAA&&AAM##MHAGG&GG&&GGGG93X5SS2XX9hh3255X2issii5X3h9X22555XXXX9H@A. rA@@@@@@@@@@@@@@@@@@@@@@@@@@', 'BAM#BAAAAAAHHAAAHM##MBHAAAAAAAAAAAAG9X2X3hGXiii5X9hG3X9Xisi29B##BA33hGGhGB@@r ;9@@@@@@@@@@@@@@@@@@@@@@@@@@', 'BAM#MHAAAHHHAAAAHM###BHAAAAAAAAAAAAGhXX3h2iSX&A&&AAHAGGAGs;rrri2r;rSiXGA&B@@9. ,2#@@@@@@@@@@@@@@@@@@@@@@@@@', 'B&B#MHAAAAHHHAAAHM##MBHAAAAAAAAAAHAG93XSrs5Xh93h3XXX93529Xr;:,,:;;s25223AB@@@; sB@@@@@@@@@@@@@@@@@@@@@@@@@', 'B&B#BAAAAAHHHAAAHB##MBAAAAAAAAAAAHHAh5rs2AGGAhXisiissSsr;r;::,:riiiisrr,s#@@@9. ,2#@@@@@@@@@@@@@@@@@@@@@@@@', 'B&B#BAAAAAAHAAAAHM###BHA&AAAAAA&AAHA2S&#@MBHGX22s;;;;r;;:,:,,:;;rrr:,,:,.X@@@@r :9@@@@@@@@@@@@@@@@@@@@@@@@', 'BAM#MAAAAAAAAAAAAB##MBAA&AAAAAAA&AH929AHA9XhXirrir::;r;;:::,:,,:,;rsr;,.,;2@@@#, :G@@@@@@@@@@@@@@@@@@@@@@B', 'B&B#MAAAAAAHAAAAABM#MHAA&&&&&&&&&H&ss3AXisisisr;;r;::;::::,..,,,,::;rir;,;,A@@@G. ;9@@@@@@@@@@@@@@@@@@@@@#', 'B&B#MHAAAAHHAAAAABM#MHAAA&G&A&&&AG2rr2X; .:;;;;::::::::::,,,,,:,.,;::;;,;rr:@@@@X :2#@@@@@@@@@@@@@@@@@@@@', 'B&B##HAAAAHHAAAAABMMMHAA&&&&&AAA&h2:r2r..:,,,,,,,,,,,,:;:,,,,,,. ,;;;::, ;2rr@@@@2 :SB@@@@@@@@@@@@@@@@@@@', 'BGB##HAAAAAAAAAAABMMMBAA&&&&&&&&AHr ir:;;;;:,,,,,,::::,,:,:,,,,...;:;:,:,:2Xr&@@@@3. .rG@@@@@@@@@@@@@@@@@@', 'B&B@#B&&AAAAAA&&AHMMMBAA&&&&&&&&AH,.i;;rrr;::,,:::::::,,::::::,,..;,:;.;;iXGSs#@@@@A, :5#@@@@@@@@@@@@@@@@', 'B&M@@B&&AAAHAA&&AHMMMBAA&&&&&&&&AA;,;rrrrr;;::::::::::::::::::::.:;.::,:5A9r,.9@@@@@M; .;G@@@@@@@@@@@@@@@', 'B&M@@B&&AAHAAA&&AHMMMBAA&G&GG&&&AM3;rrr;rr;;;;;;:::::;;,:,::,,,..,:;;:,;2r:.:;r@@##@@@i .sH@@@@@@@@@@@@@', 'BGM@@B&&AAAHAA&&AHMMMBHAGGGG&&&&AMHs;srrr;r:;;;;::::::,..,,,,,,...,;rrrsi, . :,#@####@@A; ,iB@@@@@@@@@@@', 'B&#@@B&&AAAAAA&&AHMMMBAA&GGGGG&&&BHr,rirr;;;::::::::::,,,,,::,,::,.,SS;r:.;r .,A#HHMBB#@@2, :iA@@@@@@@@@', 'B&#@@B&&AAAAAA&&AHBMBBAAGGGGGGG&&H#2:sis;;;::,,:::r;rsrr23HMAXr:::,:;...,,,5s,,#BGGAAAAB@@#i. ,rG@@@@@@@', 'B&#@@BG&AAAAAA&&AHHBMHAAGGhhGGGGGA#Hrs9s;;;;r;:;s5Xrrh@@@@@@@@&5rr;. .,,;. ;;.;@Bh39hhhAM#@@Ar. ,rG#@@@@', 'BA#@@BG&AAAAAA&&AHBMMBA&GGGGGGGGGAM#3r5SsiSSX@@@#@@i. 2h5ir;;:;r;:...,,:,.,;,,3@HG99XX23&H#MMBAS, .;2H@@', 'BA#@@B&&AAAAAA&&&AHBMBAA&GGGGGGGhABMhsrirrS9#@Mh5iG&::r;..:;:,,.,...,::,,,...,A@A&h9X255XGAA93B#MX; .:X', 'BH@@@B&&AAAAAA&G&ABM#BHAGGGGGGGGG&HBAXiir;s2r;;:rrsi.,,. .....,,,,::,.,,:: :2@H&Gh9X2523AG253AM@@Ai, ,', 'MB@@@B&&AAAAAAGGAA###@#H&GGGGGGG&AHBAXXi;,. .:,,, .;:,.,;:;..,::::;;;:,,,:,srs5@B&hhh32229AG2S29GAB#@#A2; .', 'MB@@@BGGAAAAA&&GAHr ,sH#AGGhhGGG&AH&X22s:..,. . ;S:,. .,i9r;::,,:;:::,:::,,5A#BAhhhX22X9AG2i2X9hG&AB#@@B3r', 'MB@@@B&&AAAAAA&AM#;.. ;AAGhhGGG&AHGX2XXis::,,,,,Xi,.:.ri;Xir;:,...,:::;::,.:S9#AGh9X2229A&2i52X39hhG&AM@@&', 'MM@@@B&GAAAHBHBhsiGhhGi. 3MGGhGGG&HH&X52GXshh2r;;rXiB25sX2r;;:ii;,...:;:;:;:.., r#G33X2223AG2i52XX3339hGAA&&', '#M@@@B&GAM#A3hr .;S5;:, ;MAGhGGG&ABAX55X9rS93s::i::i52X;,::,,,;5r:,,,::;;;:,.i @@AXX222X&G2S52XXXX3399hhh&', '#M@@@BAB&S; .:, .,,;,;;. rBGhhGG&ABAXSS29G5issrrS,,,,,:,...,;i;rr:,:,,::;::,,r #@@B25523&G2iS2XXX3X33999h&', '#M@@@MH; ,. .;i::::;rr;, ,M&GGGh&AHAXSS2X3hXirss5;r;:;;;2#@@H9Ai;::,,,,:;:;:: ,@@@#Xi23&G2iS2XXX3X33339h&', '#M#@@#i .:;,.,::,::;&ii;.;#AGhGG&AHAXSS2XX3&hir;;s9GG@@@@@h;,,riirr;:,.:;;;. i@##@@AS2hh5iS222XXXX3999hG', '#M@@@@:.;,,:r,,;r,,..h#sr: rHAGhG&AHAXSi52X39AAir::is;::,,. .::,sssrr;,,;r: ,@@MM#@@#HBA2iiSS5522XX39hhG', '#M@@@@r.sr,:rr::r;,, ,As:, :B&hh&ABAXSiSS5229HHS3r;rSSsiiSSr;:,,,:;;r;;; @@#BMM#@@@@@@@@#MH&93XXXXX3G', '#M@@@@A,:r:,:i,,rr,,. ;;;,. ;BGhhGAHAX5529hAAAM#AH#2i25Ss;;;:.....,rSi2r M@@MMMM##@#@@@@@@@@@@@@@@#MHA', '#M@@@@M::rr::SS,;r;::.:;;r:rHAh9h&ABM##@@@@@@@@ABAAA25i;::;;;:,,,,:r32: H@@#MM######@@@@@@@@@@@@@@@@@#', '#M@@@@@5:;sr;;9r:i;,.,sr;;iMHhGABM#####@@@@@@@BHH&H@#AXr;;r;rsr;;ssS; H@@##########@@@##@@@@@@@@@@@@#', '#M@@@@##r;;s;:3&;rsSrrisr:h#AHM#######BM#@@@#HHH9hM@@@X&92XX9&&G2i, .,:,@@@##M########@@@####@@@@@@@@@##', '#M#@@@M@2,:;s;;2s:rAX5SirS#BB##@@@##MAAHB#@#BBH93GA@@@2 2@@@MAAHA .,,:,,. G@@#M#################@@@@@@#####', '#M#@@#M@;,;:,,,;h52iX33sX@@#@@@@@@@#Ah&&H####HhA@@@@@@@;s@@@@H5@@ . r@@##M###########@###@@@@@@#######', '#M#@@@#r.:;;;;rrrrrri5iA@@#@@@@@@@@#HHAH##MBA&#@@@@@@@@3i@@@@@3:, ,@@#M############@@###@@@@@########', '#M@@@@r r::::;;;;;;rirA@#@@@@@@@@@@@#MGAMMHBAB@@@@@@@@@#2@@@@#i .. #@##M#####@###@@@@###@@@@##########', '#M#@@@ 2;;;;;;rr;rish@@#@#@@@@@@@@@@B&hGM#MH#@@@@@@@@@@3;,h@. .. :@@MM#######@@@@#####@@@@###########', '#M@@#A ;r;riirrrr;:2S@###@@@@@@@@@@@#AH#@#HB#@@@@@@@@@@@@2A9 @@#BMMM############@#@@@####M#######', '#M@MM# ,:,:;;,5ir@B#@@@@@@@@@@@@@@@@@#MMH#@@@@@@@@@@@@r Ms B@#MMMMMM####@###@@#@@@@#####M######@', '##Mh@M . ...:;;,:@A#@@@@@@@@@@@#@@@@@@#MMHAB@@@@#G#@@#: i@@ r@@#MMM#######@@@@#@@@@@@#####M#####@@', '#H3#@3. ,. ... :@@&@@@@@@@@@@@@@#@@#@@@MMBHGA@H&;:@@i :B@@@B .@@#MM####@@@##@@@#@@@@@#######M##M#@@@', 'M&AM5i;.,. ..,,rA@@MH@@@@@@@@@@@@@##@@@@@MMMBB#@h9hH#s;3######, .A@#MMM#####@@@@@##@@@#@@#####M#####M39B']
f9ffea9f874b71d0efa43528442cac1b805e08a8
50,812
def _FindAndRemoveArgWithValue(command_line, argname): """Given a command line as a list, remove and return the value of an option that takes a value as a separate entry. Modifies |command_line| in place. """ if argname not in command_line: return '' location = command_line.index(argname) value = command_line[location + 1] command_line[location:location + 2] = [] return value
07dc13e9ebfadd37ca0ff24de1407869b8667508
50,813
def wait_for_vanish(self, timeout=None): """ DEPRECATED! Use wait_until_vanished command instead. """ self.logger.warning('"wait_for_vanish" command is deprecated, use "wait_until_vanished" instead!') self.wait_until_vanished(timeout=timeout) return self
f8534f9651182ca4d77c90025035a276c8f5bacd
50,814
def get_taxonomy_collapser(level, md_key='taxonomy', unassignable_label='unassigned'): """ Returns fn to pass to table.collapse level: the level to collapse on in the "taxonomy" observation metdata category """ def f(id_, md): try: levels = [l.strip() for l in md[md_key].split(';')] except AttributeError: try: levels = [l.strip() for l in md[md_key]] # if no metadata is listed for observation, group as Unassigned except TypeError: levels = [unassignable_label] # this happens if the table is empty except TypeError: levels = [unassignable_label] result = ';'.join(levels[:level+1]) return result return f
59dd2496cfca03d41379469eee7769ba5e08ec63
50,815
def get_start_and_end(): """ asks the user to enter a starting number (integer), ending number (integer) :return: start: int, end: int """ while True: try: start = int(input("start int: ")) end = int(input("end int: ")) break except ValueError: print("you did not enter proper ints. try again...") return start, end
12dc53ebfa9b62e14c8f5ed741f5aa85684b673a
50,817
def rank(p1, p2): """ Ranks two paths :param p1: First path object :param p2: Second path object :return: if p1 is ranked more than p2, then 1; if equal, then 0; else -1 """ if len(p1.relationships) > len(p2.relationships): return 1 elif len(p1.relationships) == len(p2.relationships): return 0 else: return -1
666a08834243c87458f28fca4936dcde67165935
50,818
import hashlib def generate_hashed_ports_num(pod_uid, port_name, port_count, port_start, port_end): """ Random generate the port number The algorithm is: (int(md5(podUid + portName + portIndex)[0:12] ,16) + int(md5(podUid + portName + portIndex)[12:24] ,16) + int(md5(podUid + portName + portIndex)[24:32] ,16)) % (port_end - port_start) + port_start """ port_list = [] for i in range(port_count): raw_str = "[{}][{}][{}]".format(pod_uid, port_name, str(i)) hash_str = hashlib.md5(raw_str.encode("utf8")).hexdigest() port_list.append( str((int(hash_str[:12], 16) + int(hash_str[12:24], 16) + int(hash_str[24:], 16)) % (port_end - port_start) + port_start)) return port_list
66ad221cd4aec8e33a1ee79a23c27bb6af239392
50,819
import os def ensure_expandusr(path): """ Ensure paths contain ~[user]/ expanded. """ return os.path.expanduser(path) if '~' in path else path
302a202b6bed446cd9358e3010b526f0168d594b
50,822
import math def atms(RT, TT, DNT, GAMAL, R): """ Internal routine used by REFRO Refractive index and derivative with respect to height for the stratosphere. Given: RT d height of tropopause from centre of the Earth (metre) TT d temperature at the tropopause (deg K) DNT d refractive index at the tropopause GAMAL d constant of the atmospheric model = G MD/R R d current distance from the centre of the Earth (metre) Returned: DN d refractive index at R RDNDR d R rate the refractive index is changing at R P.T.Wallace Starlink 14 July 1995 Copyright (C) 1995 Rutherford Appleton Laboratory Copyright (C) 1995 Association of Universities for Research in Astronomy Inc. """ B = GAMAL / TT W = (DNT - 1.0) * math.exp(-B * (R - RT)) DN = 1.0 + W RDNDR = -R * B * W return DN, RDNDR
f370c7b7b030867049b457ece062d7435c15c30b
50,824
def check_statement(): """Create a test fixture to compare statements.""" def check_statement(actual, test): """Check that statements are match.""" assert actual.keys() == test.keys() assert actual['id'] == test['id'] assert actual['description'] == test['description'] if 'direction' in test.keys(): # MOA doesn't have direction? assert actual['direction'] == test['direction'] assert actual['evidence_level'] == test['evidence_level'] assert actual['proposition'].startswith('proposition:') assert actual['variation_origin'] == test['variation_origin'] assert actual['variation_descriptor'] == test['variation_descriptor'] if 'therapy_descriptor' not in test.keys(): assert 'therapy_descriptor' not in actual.keys() else: assert actual['therapy_descriptor'] == test['therapy_descriptor'] assert actual['disease_descriptor'] == test['disease_descriptor'] assert actual['method'] == test['method'] assert set(actual['supported_by']) == set(test['supported_by']) assert actual['type'] == test['type'] return check_statement
85116ce0b18a56b4b95bed9d1223dea393afff72
50,827
import platform def get_os_name() -> str: """Get operating system name that Node.js repos know about.""" return platform.system().lower()
b70c031670b27ddf2444d76c7495311a6f9e7eee
50,828
import numpy def get_values(loss_type, assets, time_event=None): """ :returns: a numpy array with the values for the given assets, depending on the loss_type. """ return numpy.array([a.value(loss_type, time_event) for a in assets])
10bfb36673143db21ef57872eb70ddfc32d056f4
50,830
def get_options(options): """ Get options for dcc.Dropdown from a list of options """ opts = [] for opt in options: opts.append({'label': opt.title(), 'value': opt}) return opts
61df747bf10a08aff481c509fbc736ef406d006f
50,832
def get_columns_from_data(data, columns): """ This function is a wrapper around the indexing of pandas data frames. This function gets all of the rows from the data frame. :param data: Pandas data frame :param columns: The columns which are to be selected. :return: Pandas data frame """ return data.loc[:, columns]
5638efbf632c80b81eb53c98376a12599d6810a5
50,833
def CutDirectory(Path, CutPath): """This allows me to keep trove's file structure""" return Path.replace(CutPath+"\\", "")
315f3fef039e61d321387c000c4c9d26de10beba
50,834
def get_celery_worker_module_name(app_id): """ Returns the python module name of the queue worker script. Args: app_id: The application ID. Returns: A string of the module name. """ return 'app___' + app_id
52b79ec69da4c064062fcfb5dcd7edb94a095809
50,835
def solution(l): """ Solution 4 again passes all but the last test case. Try to speed things up some using a dynamic programming-like approach. This solution wound up passing all of the test cases -- the key here is to uses a memorization/dynamic programming approach. A core component of this problem involves finding all multiples of a number after a given number in the list. In the brute force approach, we do the following: 0: for each li: 1: for each lj such that j > i: 2: if li divides lj: 3: for each lk such that k > j: 4: if lj divides lk: (li, lj, lk) is a valid solution Note that steps 3 and 4 involve counting the number of valid values of lk for a given lj. Since we are evaluating all possible values of lj for each possible value of li, this means that we would potentially repeat steps 3 and 4 multiple times for the *same value of lj*. Take the example: l = [1, 1, 1, 1, 3] In this case we would evaluate the number of valid lks for the final '1' 3 times. In the worst case, where l is of length N and consists of all 1's, we would be finding the valid lks for the penultimate lj (N-2) times. To improve on this, we can cache/memorize the values as we compute them. We'll store the smallest computation -- the number of possible values of lk for a given lj. Then, as we traverse the list, if we have already computed the values of lk for a given lj, we just use the value that we previously computed. This touches on the concept of Dynamic Programming. """ # Make sure no numbers are less than 1 or greater than 999999 for li in l: if li > 999999 or li < 1: return 0 # Get number of elements in the list n_l = len(l) # If there are fewer than 3 elements in the list, then there # can't be any lucky triples, so return 0 if n_l < 3 or n_l > 2000: return 0 # Initialize counts -- d_cts[j] corresponds to the number of valid values # of l[k] for l[j]. d_cts = [-1] * n_l ctr = 0 # First iterate over i for i in range(n_l-2): for j in range(i+1, n_l-1): if l[j] % l[i] == 0: # Check to see if we already computed this if d_cts[j] == -1: # Count the number of valid divisors for l[j] d_ctr = 0 for k in range(j+1, n_l): if l[k] % l[j] == 0: d_ctr += 1 d_cts[j] = d_ctr # Add the pre-computed value ctr += d_cts[j] return ctr
0387a39e6d087f381aece01db2718a9e90ba642c
50,836
def update_statutory_deduction(employee, local_service_tax): """Update statutory deductions of an employee""" deduction = employee.statutorydeduction deduction.local_service_tax = local_service_tax deduction.save() return deduction
29fdfb85721d8d70acc711299e44ced52e4f057a
50,837
import torch def fista_momentum(cur_Z, prev_Z, momentum): """ Calculates a linear combination of the last two sparse codings with a momentum term :param cur_Z: Sparse code found in current step :param prev_Z: Sparse code found in previous step :param momentum: float. Momentum term. :return: Updated sparse code to be used in next step. """ next_momentum = (1 + torch.sqrt(1+4*(momentum**2)))/2 momentum_ratio = (momentum - 1) / next_momentum pushed_Z = (cur_Z - prev_Z) * momentum_ratio next_Z = cur_Z + pushed_Z return next_Z, next_momentum
0b8435cae2b006eb18604d71933d46a4c8e3c5d6
50,838
def blazar_find_old_host_alloc(db): """Find computehost allocations tied to expired leases""" sql = '''\ SELECT ca.id, l.id AS lid, ch.hypervisor_hostname FROM blazar.computehost_allocations ca JOIN blazar.reservations r ON ca.reservation_id=r.id JOIN blazar.leases l ON r.lease_id=l.id JOIN blazar.computehosts ch ON ca.compute_host_id=ch.id WHERE ca.deleted is Null AND l.end_date < curdate() ''' return db.query(sql, limit=None)
717bcb2d788be24d8c1b96bab62d60c54e196a25
50,839
import collections def separate(xs, blocks): """Partition ``xs`` into ``n`` different list based on the corresponding labels in ``blocks``. """ sorter = collections.defaultdict(list) for x, b in zip(xs, blocks): sorter[b].append(x) x_b = list(sorter.items()) x_b.sort() return [x[1] for x in x_b]
967451fd3b4505b91e354e5714a7e62c44b62231
50,840
def connect_streets(st1, st2): """ Tells if streets `st1`, `st2` are connected. @param st1 street 1 @param st2 street 2 @return tuple or tuple (0 or 1, 0 or 1) Each tuple means: * 0 or 1 mean first or last extremity or the first street * 0 or 1 mean first or last extremity or the second street ``((0, 1),)`` means the first point of the first street is connected to the second extremity of the second street. """ a1, b1 = st1[0], st1[-1] a2, b2 = st2[0], st2[-1] connect = [] if a1 == a2: connect.append((0, 0)) if a1 == b2: connect.append((0, 1)) if b1 == a2: connect.append((1, 0)) if b1 == b2: connect.append((1, 1)) return tuple(connect) if connect else None
77ee8f4c344277b09340cc3a38e16ee5ae11f702
50,842
def sum_of_n(n): """Sum all numbers from 0 to N, recursively.""" if n == 0: return 0 return n + sum_of_n(n - 1)
cc804ad0fb745ebdc6badfb7d50c1d0fac2e7c5b
50,843
def stiff_v(nodes): """nodes: (B, 1, N, 2)""" Pf = nodes.roll(-1, dims=2) Pff = Pf.roll(-1, dims=2) Pb = nodes.roll(1, dims=2) Pbb = Pb.roll(1, dims=2) K = - Pff + Pf * 4 - 6 * nodes - Pb * 4 - Pbb * 2 return K
e83bdac33ace21cf4932e18995ab5682aea28991
50,844
import torch def unit_test(input_vector): """ This is to verify the diverse loss class using the normal for-loop method. """ input_vector = [tensor.numpy() for tensor in input_vector] input_vector = torch.as_tensor(input_vector).transpose(0, 1) b, k, d = input_vector.shape loss_values = 0 for i in range(b): sample = input_vector[i] loss = 0 for j in range(k): vector1 = sample[j, :] norm1 = torch.norm(vector1) for jj in range(k): if j == jj: continue vector2 = sample[jj, :] norm2 = torch.norm(vector2) dot = vector1 @ vector2 loss += dot/(norm1*norm2) loss_values += loss/(k*(k-1)) return loss_values/b
fec37d86b41d0b1a11a8e54a297b42c6e552d04e
50,845
def get_nested_serializer(instance, key): """Returns the instance's nested serializer under the 'key' field with its data filled out """ serializer_class = instance._declared_fields.get(key).__class__ serializer = serializer_class(data=instance.validated_data[key]) serializer.is_valid(raise_exception=True) return serializer
2423289a4455783880f83d0804f74d0ceded03ce
50,846
def word2features(sent, i): """返回特征列表""" word = sent[i][0] postag = sent[i][1] features = [ 'bias', 'word=' + word, 'word_tag=' + postag, ] if i > 0: features.append('word[-1]=' + sent[i-1][0]) features.append('word[-1]_tag=' + sent[i-1][1]) if i > 1: features.append('word[-2]=' + sent[i-2][0]) features.append('word[-2, -1]=' + sent[i-2][0] + sent[i-1][0]) features.append('word[-2]_tag=' + sent[i-2][1]) if i < len(sent) - 1: features.append('word[1]=' + sent[i+1][0]) features.append('word[1]_tag=' + sent[i+1][1]) if i < len(sent) - 2: features.append('word[2]=' + sent[i+2][0]) features.append('word[1, 2]=' + sent[i+1][0] + sent[i+2][0]) features.append('word[2]_tag=' + sent[i+2][1]) return features
781a7bf8e8d39e05a09befae254b306ba6d2eb7a
50,847
def _highlight(timing: float) -> str: """Highlights a timing based on whether it is under 60 fps.""" if timing < 1 / 60: return f"[#57A773] {timing}" return f"[red] {timing}"
e4481d0631394850d00900edec3acd5c7f5b0cd6
50,848
import itertools def repeat(value, times=-1): """:yaql:repeat Returns collection with value repeated. :signature: value.repeat(times => -1) :receiverArg value: value to be repeated :argType value: any :arg times: how many times repeat value. -1 by default, which means that returned value will be an iterator to the endless sequence of values :argType times: int :returnType: iterable .. code:: yaql> 1.repeat(2) [1, 1] yaql> 1.repeat().take(3) [1, 1, 1] """ if times < 0: return itertools.repeat(value) else: return itertools.repeat(value, times)
1554022dbc709ff40e1a600cd892c8a07782c6e1
50,849
def solution(pence: int) -> int: """Returns the number of different ways to make X pence using any number of coins. The solution is based on dynamic programming paradigm in a bottom-up fashion. >>> solution(500) 6295434 >>> solution(200) 73682 >>> solution(50) 451 >>> solution(10) 11 """ coins = [1, 2, 5, 10, 20, 50, 100, 200] number_of_ways = [0] * (pence + 1) number_of_ways[0] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(coin, pence + 1, 1): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence]
d7670650fe734b432bbfb55f13c545905fee3bce
50,850
def remove_nans(frame): """ Remove rows with nan values Args: frame (pandas Series or Dataframe): column(s) Returns: frame: the same data structure without nans """ frame = frame.dropna() return frame
b6829f06c5fa130f1c42590359e0dc8e15da4e7b
50,851
from typing import List def sortListAlphabetical(arr: List[str]) -> List[str]: """ >>> sortListAlphabetical(["welcome","to","open","source"]) ['open', 'source', 'to', 'welcome'] """ return ['open', 'source', 'to', 'welcome']
20bd7c190a6bb366d501f5a210e7058a43d2af1e
50,852
def getMainDFsavedInStep(path_to_results, path_to_directory, file_name, ending): """ Takes in Returns """ dir_path = path_to_results + "/" + path_to_directory full_path_to_results_file = dir_path + "/" + file_name + ending return full_path_to_results_file
7590e5a75da06743d016331d5824984602f9aec0
50,855
def filtered_dict(dictionary: dict, threshold, invert: bool = False): """ Removes all keys from a dictionary whose value is less than a given threshold :param dictionary: The dictionary to filter :param threshold: The threshold below which to remove elements :param invert: Whether to invert the threshold (remove elements above threshold) :return: The filtered dictionary """ return {key: value for (key, value) in dictionary.items() if (value < threshold) == invert}
de5687a625fd1d5dfa86e9fe06a8cd85b8a899d9
50,857
import os def canonicalize_path(cwd, path): """ Canonicalizes a path relative to a given working directory. :param cwd: The working directory to interpret ``path`` relative to. :param path: The path to canonicalize. If relative, it will be interpreted relative to ``cwd``. :returns: The absolute path. """ if not os.path.isabs(path): path = os.path.join(cwd, path) return os.path.abspath(path)
0342d2a5f6e5780bdcfd51a0da237999a12c906f
50,858
def input_moves(msg=''): """This function ensures execution of code even if player chooses invalid number of moves""" while(True): try: moves=int(input(msg)) if moves > 0: break # was able to convert to integers larger than 0 else: print("Moves must be an integer larger than 0") except: print("Moves must be an integer larger than 0") pass # it will repeat the loop return (moves)
129bfc1b5ea0fecdb9e42110ff7def91644abbca
50,861
def dict_with_keys(dictionary, keys): """ Returns a new dictionary including only the specified keys Args: dictionary(dict): dictionary to filter keys keys(iterable): iterable of keys to filter to Returns: dict: copy of original dictionary inclusive only of specified keys """ return {key: dictionary[key] for key in keys}
e93d547b515269c9e2320e41d15ab39ae363c209
50,862
def codegen_reload_data(): """Parameters to codegen used to generate the fn_datatable_utils package""" reload_params = {"package": u"fn_datatable_utils", "incident_fields": [], "action_fields": [], "function_params": [u"dt_utils_cells_to_update", u"dt_utils_datatable_api_name", u"dt_utils_row_id", u"dt_utils_search_column", u"dt_utils_search_value", u"incident_id"], "datatables": [u"dt_utils_test_data_table"], "message_destinations": [u"fn_datatable_utils"], "functions": [u"dt_utils_delete_row", u"dt_utils_get_row", u"dt_utils_update_row"], "phases": [], "automatic_tasks": [], "scripts": [], "workflows": [u"example_data_table_utils_delete_row", u"example_data_table_utils_get_row", u"example_data_table_utils_update_row"], "actions": [u"Delete DT Row", u"Get DT Row", u"Update DT Row"] } return reload_params
9ddd053cc30724dbdecde9105135187fbf2a951e
50,863
def iscc_clean(i): """Remove leading scheme and dashes""" return i.split(":")[-1].strip().replace("-", "")
66a6705d48528b7c182fc051c9793bae3506f0dc
50,864
def reverse_range(object): """Yields reverse range of object: list(reverse_range([1, 2, 3])) -> [2, 1, 0].""" return range(len(object) - 1, -1, -1)
9bacca7d17e3ae5678b45c37eec1ea8d0fc78cf6
50,865
import threading def start_agent_thread(cls, **kwargs): """Instantiate an agent class and run it in a new daemon thread. Returns the thread object. """ agent = cls(**kwargs) thread = threading.Thread(target=agent.run) thread.daemon = True thread.start() return thread
2f7adfe81df347ee83932293b3026ab285c0b516
50,866
def combine_quality( dqarr1, dqarr2 ): """ Combines two data quality arrays to make a third. The bitwise nature of the data quality flags means that two arrays can be combined without needing to know the meaning of the underlying flags. :Parameters: dqarr1: numpy array or None numpy array which represents a dq plane (or part of it). Can be None if there is no data quality information. dqarr2: numpy array or None numpy array which represents another dq plane (or part of it). Can be None if there is no data quality information. :Returns: newdq: numpy array numpy array containing a combination of both DQ arrays. Can be None if both of the input arrays are None. """ # Check which of the arrays are defined. if dqarr1 is not None and dqarr2 is not None: # There are two quality arrays - merge them. # The bitwise OR operation combines the effect of the flags # without the need to know what they mean. newdq = dqarr1 | dqarr2 elif dqarr1 is not None: # Only array 1 is defined - return it. newdq = dqarr1 elif dqarr2 is not None: # Only array 2 is defined - return it. newdq = dqarr2 else: # Neither array is defined - return None. newdq = None return newdq
0d24c74309b761d054455b5d77061330d48e20e2
50,867
import argparse def _parse_args(): """Parse command line arguments and share the right ones with uwsgi.""" parser = argparse.ArgumentParser('Entrypoint for starting up uwsgi.') return parser.parse_known_args()
25c1af629b09864c0a0c29fa06b5ca8259b9c7a2
50,869
def gamma(configuration): """ Fonction gamma de Sprague-Grundy pour le jeu de Nim. """ resultat = 0 for nb in configuration: resultat = (resultat ^ nb) return resultat
bccd37b1122873e7f4b6a3aa5fdeff96f272df5b
50,870
def list_to_string(a_list, quote=False): """ Converts a list to a set of strings separated by a comma. :param a_list: The list to convert. :type a_list: list :param quote: If true, then surround each item with quotes. :type quote: bool :return: The string version of the list. """ if quote: string = ', '.join(('"{0}"'.format(v) for v in a_list)) else: string = ', '.join(('{0}'.format(v) for v in a_list)) return string
d7a517225e1a5a70320f05b0c7c69e3b5c558f4c
50,872
import os def getSubpackages(name): """获取该模块下所有的子模块名称""" splist = [] for dirpath, _dirnames, _filenames in os.walk(name): if os.path.isfile(os.path.join(dirpath, '__init__.py')): splist.append(".".join(dirpath.split(os.sep))) return splist
a876a18694e5124d345a0f7b1db5e8b9291983b9
50,873
def _tc_minmax_ ( tree , var , cuts = '' , delta = 0.0 ) : """Get min/max for the certain variable in chain/tree >>> chain = ... >>> mn,mx = chain.vminmax('pt') >>> mn,mx = chain.vminmax('pt','y>3') """ if hasattr ( tree , 'pstatVar' ) : if cuts : s = tree.pstatVar ( var , cuts ) else : s = tree.pstatVar ( var ) else : if cuts : s = tree.statVar ( var , cuts ) else : s = tree.statVar ( var ) mn,mx = s.minmax() if mn < mn and 0.0 < delta : dx = delta * 1.0 * ( mx - mn ) mx += dx mn -= dx return mn , mx
4bd4389e34bacae622162792181aa72007fc0f25
50,874
def unwind_block_transactions(block): """Yield a block for each transaction in given block""" return block['block']['transactions'].map(lambda tx: block.merge({'tx': tx}))
79ec770ba28450575cbbd063c27afcae57029abf
50,875
def argtopk_preprocess(a, idx): """ Preparatory step for argtopk Put data together with its original indices in a tuple. """ return a, idx
55e7566a5a88bf45a0eda76513a10528180dc8a9
50,876
def bone_siblings(obj, bone): """ Returns a list of the siblings of the given bone. This requires that the bones has a parent. """ parent = obj.data.bones[bone].parent if parent is None: return [] bones = [] for b in parent.children: if b.name != bone: bones += [b.name] return bones
f63d16eee87a315cbf9213fd8a86ad248370ceb7
50,877
import hashlib def sha1_hasher(s): """ A simple utility function for producing a sha1 digest of a string. """ return hashlib.sha1(s).digest()
a214bc5cc78e7398373f7dd7c50d1e3ef59d1f8f
50,879
def pathto(*args): """Mock of sphinx' pathto() just for the homepage. Otherwise we cannot re-use our sphinx templates. """ return ""
978471934469cb0c74cc476f2aae9c800953c48b
50,880
def generate_stack_id(stack_name: str) -> str: """Generate a stack ID from the stack name""" return ( f"arn:aws:cloudformation:ap-southeast-2:123456789012:stack/{stack_name}/" "bd6129c0-de8c-11e9-9c70-0ac26335768c" )
130749de4bf983b22a26aaac5412c55c5baf5e37
50,883