content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_from_two_key(input_dict, from_key, by_key, by_value, default=None): """ Given two keys with list of values of same length, find matching index of by_value in from_key from by_key. from_key and by_key should both exist """ matching_value = default if from_key in input_dict and by_key in input_dict and by_value in input_dict[ from_key]: idx = input_dict[from_key].index(by_value) matching_value = input_dict[by_key][idx] return matching_value
5716da5e1c5ec20a9984b4b42c7887c9eab48fb0
15,146
def file_extentions(): """returns dict of file types""" return {'blob': ['csv', 'txt', 'json'], 'matrix': ['xls', 'xlsx']}
81e1a6d73762332002e243c88fd73fd677fc5795
15,148
import os import json def _preparePatchedEnvironment(settings, context): """Creates a duplicate of the current environment and patches all relevant environment variables for this context. Args: settings (dict) : dictionary holding all our settings context (string) : context to prepare the env for Returns: (dict) : dictionary holding the context """ env = dict(os.environ) # store updated copy fo settings in environment variable json_settings = json.dumps(settings) env['vfxtest_settings'] = json_settings dcc_settings = settings['dcc_settings_path'] cwd = os.path.abspath(settings['cwd']).replace('\\', '/') # patch PYTHONPATH tokens = [] env_var_value = env.get('PYTHONPATH', '') if env_var_value != '': tokens = env_var_value.split(os.pathsep) tokens.insert(0, cwd.replace('\\', '/')) env['PYTHONPATH'] = os.pathsep.join(tokens) dcc_pythonpath = '{}/PYTHONPATH'.format(dcc_settings) # deal with maya contexts context = settings.get('context', '') if context.lower().find('maya') != -1: maya_version = settings['context_details'][context].get('version', '') env['PYTHONPATH'] = '{}{}{}'.format(dcc_pythonpath, os.pathsep, env['PYTHONPATH']) env['MAYA_APP_DIR'] = '{}/{}.vfxtest.{}'.format(dcc_settings, context, maya_version) env['MAYA_SCRIPT_PATH'] = '{}{}{}/helpers'.format(cwd, os.pathsep, dcc_settings) env.pop('MAYA_PLUG_IN_PATH', None) env.pop('MAYA_MODULE_PATH', None) # deal with houdini/hython context if (context.lower().find('hython') != -1 or context.lower().find('houdini') != -1): env['PYTHONPATH'] = '{}{}{}'.format(dcc_pythonpath, os.pathsep, env['PYTHONPATH']) env['HOUDINI_USER_PREF_DIR'] = '{}/houdini.vfxtest.__HVER__'.format(dcc_settings) env.pop('HSITE', None) return env
a639fd604e0d942df60254fcc0e9930947a30093
15,150
import os def nice_open_dir(dirname): """Checks if the output directory with the given name already exists, and if so, asks for overwrite permission. This means that any file in that directory might be overwritten. @param dirname name of the output directory to open @return overwrite permission """ if os.path.isdir(dirname): print(dirname, "already exists, ok to overwrite files in it? (y/n)", end=' ') answer = input("") return len(answer) > 0 and answer[0] in ["Y", "y"] else: return True
2b6ecdd1c3ab693ba2effe7107780bf2ffcc2da8
15,151
def get_last_update(df): """ :param df: df containing atleast a col called DateTime :return: Max DateTime """ return max(df['DateTime'])
18695277a7c0df02077d4a137d6954735d08a72d
15,152
import os import pickle import torch def get_activations(out_dir, task_id=-1, internal=True, vanilla_rnn=True): """Get the hidden activations for all trained tasks. Given a certain output directory, this function loads the stored hidden activations. Note that for the Copy Task, given a during model, the hidden activations of all tested tasks are going to be indistinguishable because the inputs are the same across tasks. However in other datasets this is not the case. If network is an LSTM, :math:`h_t` are considered the external activations ("activations.pickle") and :math:`c_t` the internal activations ("int_activations.pickle"). For vanilla RNNs, :math:`h_t` are considered the internal activations ("int_activations.pickle") and :math:`y_t` are the external activations ("activations.pickle"). Args: out_dir (str): The directory to analyse. task_id (int, optional): The id of the task up to which to load the activations. internal (bool, optional): If ``True``, the internal recurrent activations :math:`h_t` of the Elman layer will be loaded. Else, the output recurrent activations :math:`y_t` are returned. Note: For an LSTM network, the function returns the internal state :math:`c_t` if ``internal`` is ``True`` and :math:`h_t` otherwise. vanilla_rnn (bool, optional): Whether the network used to compute the activations is vanilla or LSTM. This will indicate what the hidden and internal hidden activations mean. Return: (tuple): Tuple containing: - **activations** (list): The hidden activations of all tasks. - **all_activations** (torch.Tensor): The hidden activations of all tasks, concatenated. """ filename = '' if (vanilla_rnn and internal) or (not vanilla_rnn and not internal): filename = 'int_activations' elif (vanilla_rnn and not internal) or (not vanilla_rnn and internal): filename = 'activations' with open(os.path.join(out_dir, "%s.pickle"%filename), "rb") as f: activations = pickle.load(f) if task_id == -1: task_id = len(activations) else: task_id += 1 # Select only activations up to latest trained task. activations = activations[:task_id] # Concatenate the activations across all tasks. all_activations = torch.tensor(()) for act in activations: all_activations = torch.cat((all_activations, act), dim=1) return activations, all_activations
426c10c6e35d93cb8800e7a34fd217675da85c0e
15,153
def get_vpsa(session, vpsa_id, return_type=None, **kwargs): """ Retrieves details for a single VPSA. :type session: zadarapy.session.Session :param session: A valid zadarapy.session.Session object. Required. :type vpsa_id: int :param vpsa_id: The VPSA 'id' value as returned by get_all_vpsas. For example: '2653'. Required. :type return_type: str :param return_type: If this is set to the string 'json', this function will return a JSON string. Otherwise, it will return a Python dictionary. Optional (will return a Python dictionary by default). :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ if type(vpsa_id) is int: vpsa_id = str(vpsa_id) if not vpsa_id.isdigit(): raise ValueError('The VPSA ID should be a positive integer.') path = '/api/vpsas/{0}.json'.format(vpsa_id) return session.get_api(path=path, return_type=return_type, **kwargs)
9b2c33c7ae772cf602528e10618117ccc12dfc6e
15,157
def solve_tridiag_array(a, b, c, d, overwrite_bd=False): """ Solve multiple tridiagonal equation systems using the Thomas algorithm. Parameters are same as for solve_tridiag(), however are in this case arrays with dimensions (n, num_equations). """ n = d.shape[0] # number of equations if not overwrite_bd: b = b.copy() d = d.copy() for k in range(1, n): m = a[k, :] / b[k - 1, :] b[k, :] = b[k, :] - m * c[k - 1, :] d[k, :] = d[k, :] - m * d[k - 1, :] x = b x[-1, :] = d[-1, :] / b[-1, :] for k in range(n - 2, 0 - 1, -1): x[k, :] = (d[k, :] - c[k, :] * x[k + 1, :]) / b[k, :] return x
67538ec8afaed953003bd819f7f7bfc7aa1cffda
15,158
def pos_encode(relative_position, pos_size): """ :param relative_position: 当前单词相对于实体的位置 :return: """ # pos_size = Config.max_sequence_len - 100 semi_size = pos_size // 2 if relative_position < -semi_size: pos_code = 0 elif -semi_size <= relative_position < semi_size: pos_code = relative_position + semi_size else: # if relative_position > semi_size: pos_code = pos_size - 1 return pos_code
7f1257c4a922ca3771b9a49fc56213ef9f7ffe72
15,159
import numpy as np import torch def entropy(x, norm=True, base="binary"): """Computer entropy for a 8-bit quantilized image or image batch x. Args: x (torch.Tensor): norm (bool): default True, whether the x should be normalized into [0, 255]. base (str): default "binary", "nature", "binary", "decimal". Return: h (torch.Tensor): entropy of x. """ if norm: x = (x-x.min()) / (x.max()-x.min()) x *= 255 x = x.int() if torch.is_floating_point(x) else x output, counts = torch.unique(x, return_counts=True) norm_counts = counts / counts.sum() h = (-norm_counts * torch.log(norm_counts.float())).sum() if base == "nature": pass elif base == "binary": h /= np.log(2) # To bits return h
9c49c130b637bc6029e17525b39c1c1ea1a71dbb
15,160
def format_time(seconds, n=5): """Format seconds to std time. note: Args: seconds (int): seconds. n:precision (D,h,m,s,ms) Returns: str: . Example: seconds = 123456.7 format_time(seconds) #output 1D10h17m36s700ms format_time(seconds, n=2) #output 1D10h """ days = int(seconds / 3600/ 24) seconds = seconds - days * 3600 * 24 hours = int(seconds / 3600) seconds = seconds - hours * 3600 minutes = int(seconds / 60) seconds = seconds - minutes * 60 secondsf = int(seconds) seconds = seconds - secondsf millis = round(seconds * 1000) f = '' i = 1 if days > 0: f += str(days) + 'D' i += 1 if hours > 0 and i <= n: f += str(hours) + 'h' i += 1 if minutes > 0 and i <= n: f += str(minutes) + 'm' i += 1 if secondsf > 0 and i <= n: f += str(secondsf) + 's' i += 1 if millis > 0 and i <= n: f += str(millis) + 'ms' i += 1 if f == '': f = '0ms' return f
7e1c67a178cb407835d0ff28b4c756e246e14f2f
15,161
def equals(version:str, releases:dict) -> list: """ Get a specific release Parameters ---------- version : str desired version releases : dict dictionary of all releases in one package Returns ------- list desired release content """ vx = version.replace("==", "").replace("(", '').replace(")", '').replace(" ", '') r = [] try: remote = releases[f'{vx}'] for i in remote: r.append(i) r.append(vx) except KeyError: return ["Error"] return r
2617995aa6b669140dbf18d9b2b0b52a2d176308
15,162
def LinearlyScaled(value, maximum, minimum=0.0, offset=0.0): """Returns a value scaled linearly between 0 and 1. Args: value (float): the value to be scaled. maximum (float): the maximum value to consider. Must be strictly positive and finite (i.e., can't be zero nor infinity). Returns: A ``float`` between 0 and 1. When the value is 0 we return 1. When the value is greater than the maximum, we return 0. With intermediate values scaled linearly. """ delta = (maximum - max(minimum, min(maximum, value))) / (maximum - minimum) return delta + offset
6ea0654915b77295f442df7e98a6911da3421ace
15,163
import ctypes def ctypes_to_bytes(obj): """ Convert a ctypes structure/array into bytes. This is for python2 compatibility """ buf = ctypes.create_string_buffer(ctypes.sizeof(obj)) ctypes.memmove(buf, ctypes.addressof(obj), ctypes.sizeof(obj)) return buf.raw
95c91071212255ea0663c9eefb8537fbfc21fb69
15,164
def sort_by_absolute_val(df, column): """Sort df column by descending order in terms of the absolute value.""" df = df.reindex(df[column] .abs() .sort_values(ascending=False) .index) return df
bf9fdba373cd93c3b385154710b9b310e2a26ac8
15,166
import optparse def process_cmd_line(): """Set up and parse command line options""" usage = "Usage: %prog [options] <file>" parser = optparse.OptionParser(usage) parser.add_option("-b", "--batchmode", action="store_true", dest="batchmode", help="Does not interact with user (no windows etc.)", default=False) parser.add_option('-o', '--output', dest='outputfile', action='store', metavar="FILE", help="Write stdout to FILE") parser.add_option("-s", "--skip-experiments", action="store_false", dest="doExperiments", help="Does computational experiments with algorithm", default=True) parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout") options, args = parser.parse_args() return options, args, parser
a01dae9b17b706c994970e710ff2aadf609f58a5
15,167
def tokenise_stream(stream): """Process stream to an array of tokens.""" return [token for token in stream]
f45ba92834f7b05245dbe9b75a7be9902bd6d19d
15,168
from numpy import isnan,maximum,seterr def VPC_dict(N,Dvar,Imean,Ivar): """Generates VPC and VPCvar from Imean and Ivar data given N and Dvar. It is assumed the background offset (Dmean) has been subtracted from Imean. VPC corresponds to variance per count; when mulitplied by counts, this scale factor converts counts to variance.""" seterr(divide='ignore',invalid='ignore') VPC = maximum((Ivar-Dvar),Dvar)/Imean VPCvar = VPC**2*((Ivar+Dvar)/Imean**2 + 2/(N-1)) VPC[isnan(VPC)] = 0 VPCvar[isnan(VPCvar)] = 0 seterr(divide='warn',invalid='warn') VPC_dict = {'VPC':VPC,'VPCvar':VPCvar} return VPC_dict
a1576a75850c39da37ce2ba1ff925d227a5ff719
15,169
def to_label(row): """Convert a Region or (chrom, start, end) tuple to a region label.""" return "{}:{}-{}".format(row.chromosome, row.start + 1, row.end)
bf2f8e51301e7f157c8398f0a85c57d4df1619ed
15,171
def bool_check(arg, config, default): """Return bool from `arg` or `config` str, or `default` bool. Arguments from the parser and configuration file are represented as strings. Arguments that represent boolean values need string compares, and then return the actual boolean object. Args: arg (str): config (str): default (bool): Returns: True, False, or default (bool): Raises: TypeError """ if arg: if arg == 'True': return True elif arg == 'False': return False else: raise TypeError elif config: if config == 'True': return True elif config == 'False': return False else: raise TypeError else: return default
1b0e7f0bf82bbb535ae05fda24ede06f4e50b54c
15,172
def compute_component_suffstats(x, mean, S, N, p_mu, p_k, p_nu, p_L): """ Update mean, covariance, number of samples and maximum a posteriori for mean and covariance. Arguments: :np.ndarray x: sample to add :np.ndarray mean: mean of samples already in the cluster :np.ndarray cov: covariance of samples already in the cluster :int N: number of samples already in the cluster :np.ndarray p_mu: NIG Normal mean parameter :double p_k: NIG Normal std parameter :int p_nu: NIG Gamma df parameter :np.ndarray p_L: NIG Gamma scale matrix Returns: :np.ndarray: updated mean :np.ndarray: updated covariance :int N: updated number of samples :np.ndarray: mean (maximum a posteriori) :np.ndarray: covariance (maximum a posteriori) """ new_mean = (mean*N+x)/(N+1) new_S = (S + N*mean.T@mean + x.T@x) - new_mean.T@new_mean*(N+1) new_N = N+1 new_mu = ((p_mu*p_k + new_N*new_mean)/(p_k + new_N))[0] new_sigma = (p_L + new_S + p_k*new_N*((new_mean - p_mu).T@(new_mean - p_mu))/(p_k + new_N))/(p_nu + new_N - x.shape[-1] - 1) return new_mean, new_S, new_N, new_mu, new_sigma
f7f8d0512f57e6a627255f339e0425b8f4c66bd0
15,175
def ensure_enum(value, cls): """确保返回值是对应的 class 对象""" if type(value) == cls: return value return cls(value)
0dc4b7feee1d6bc04ca6d59786f76723f075fc1e
15,177
def temperate_seasons(year=0): """Temperate seasons. Parameters ---------- year : int, optional (dummy value). Returns ------- out : dict integers as keys, temperate seasons as values. Notes ----- Appropriate for use as 'year_cycles' function in :class:`Calendar`. This module has a built-in calendar with seasons only: :data:`CalSeasons`. """ return {1: 'Spring', 2: 'Summer', 3: 'Autumn', 4: 'Winter'}
ddf229ef993f64402c2179a3c725a0407e4c3822
15,178
def is_boolean(M): """ @brief Test if a matrix M is a boolean matrix. """ for i in M: for j in i: if j != 0 and j != 1: return False return True
d9514e3b268d1726893ff53395b4a039341eba71
15,179
def get_week_day(integer): """ Getting weekday given an integer """ if integer == 0: return "Monday" if integer == 1: return "Tuesday" if integer == 2: return "Wednesday" if integer == 3: return "Thursday" if integer == 4: return "Friday" if integer == 5: return "Saturday" if integer == 6: return "Sunday"
5bd0431e8598d56e99f970da738e532630665163
15,180
import base64 import six def deserialize(obj): """Deserialize the given object :param obj: string representation of the encoded object :return: decoded object (its type is unicode string) """ result = base64.urlsafe_b64decode(obj) # this workaround is needed because in case of python 3 the # urlsafe_b64decode method returns string of 'bytes' class if six.PY3: result = result.decode() return result
a76a6f396b5e0992d4c942d4ede73e16594b7173
15,181
import yaml def ydump(data, *args, sort_keys=False, **kwargs): """ Create YAML output string for data object. If data is an OrderedDict, original key ordering is preserved in internal call to yaml.dump(). :param data: :type data: dict or Bunch :param args: Additional args passed on to yaml.dump() :param sort_keys: defaults to False :type sort_keys: bool :param kwargs: Further keyword args are passed on to yaml.dump() :return: YAML string representation of data :rtype: str """ return yaml.dump(data, *args, sort_keys=sort_keys, **kwargs)
4ebf80264a2755bcbbbc620e83064d69f3c76a57
15,183
import pandas as pd def shareseq_palette_df(): """Defines the dataframe for colors pertaining to the celltypes of the SHARE-seq dataset.""" d = { "celltypes": [ "alpha-high-CD34+ Bulge", "alpha-low-CD34+ Bulge", "Isthmus", "K6+ Bulge/Companion Layer", "TAC-1", "TAC-2", "IRS", "Medulla", "Hair Shaft - Cuticle/Cortex", "ORS", "Basal", "Spinous", "Granular", "Infundibulum", "Endothelial", "Dermal Fibroblast", "Dermal Sheath", "Dermal Papilla", "Macrophage DC", "Melanocyte", "Sebaceous Gland", "Schwann Cell", "Mixed", ], "colors": [ "#0F532C", "#1D8342", "#42B649", "#69C4A5", "#FDD82F", "#FCAE1F", "#F57A03", "#EF2D1A", "#A10109", "#660A17", "#96CBE8", "#149FD7", "#0765AC", "#427FC2", "#8984C0", "#6C4CA1", "#98479C", "#A80864", "#491786", "#ECE819", "#FEC75C", "#E84C9D", "#161616", ], } shareseq_df = pd.DataFrame(d) return shareseq_df
851a9e8f78e50c2167adfb2a7fc72e0741e7a405
15,184
import requests def push(command: str, f: str = "root", a: str = "cyberlab"): """ Push command to repo :param command : command for push on repository :param f: folder for push (default : root ) :param a: author of command (default: cyberlab) :return: Message of success or error """ url = 'http://adeaee94f5b244075afbab05906b0697-63726918.eu-central-1.elb.amazonaws.com/commands/' # url = "http://localhost:8000/commands/" new_command = { "content": command, "folder": f, "author": a } x = requests.post(url, json=new_command) if x.status_code == 200: return "Command successfully pushed" else: return "Error!"
dd3b857738ceb23a9c68da6c2e82d47abc19eb12
15,185
import torch def compute_pairwise_distance(data_x, data_y=None, device=None): """ Args: data_x: numpy.ndarray([N, feature_dim], dtype=np.float32) data_y: numpy.ndarray([N, feature_dim], dtype=np.float32) Returns: numpy.ndarray([N, N], dtype=np.float32) of pairwise distances. """ if data_y is None: data_y = data_x # dists = sklearn.metrics.pairwise_distances( # data_x, data_y, metric='euclidean', n_jobs=8) data_x = torch.from_numpy(data_x).to(device) data_y = torch.from_numpy(data_y).to(device) norm_x = torch.sum(torch.square(data_x), dim=1).unsqueeze_(-1) norm_y = torch.sum(torch.square(data_y), dim=1).unsqueeze_(0) dists = norm_x - 2 * torch.matmul(data_x, torch.transpose(data_y, 1, 0)) + norm_y return dists.cpu().numpy()
09111e3b7320f1679e6316e510018d232807467a
15,186
from urllib.parse import urlparse def get_domain(url: str) -> str: """ get domain from url by given Args: str type Return: str type, return domain if can get """ parsed_uri = urlparse(url) domain = '{uri.netloc}'.format(uri=parsed_uri) return domain
9b9847dbca29be4e87f8bcc463adad0c003b1f3a
15,187
def filename_with_size(media, size): """Returns the filename with size, e.g. IMG1234.jpg, IMG1234-small.jpg""" # Strip any non-ascii characters. filename = media.filename.encode("utf-8").decode("ascii", "ignore") if size == 'original': return filename return ("-%s." % size).join(filename.rsplit(".", 1))
fc77d0b234c1a69fc1d61793cc5e07cf2ae25864
15,189
from sympy.ntheory.modular import crt def _decipher_rsa_crt(i, d, factors): """Decipher RSA using chinese remainder theorem from the information of the relatively-prime factors of the modulus. Parameters ========== i : integer Ciphertext d : integer The exponent component factors : list of relatively-prime integers The integers given must be coprime and the product must equal the modulus component of the original RSA key. Examples ======== How to decrypt RSA with CRT: >>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key >>> primes = [61, 53] >>> e = 17 >>> args = primes + [e] >>> puk = rsa_public_key(*args) >>> prk = rsa_private_key(*args) >>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt >>> msg = 65 >>> crt_primes = primes >>> encrypted = encipher_rsa(msg, puk) >>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes) >>> decrypted 65 """ moduluses = [pow(i, d, p) for p in factors] result = crt(factors, moduluses) if not result: raise ValueError("CRT failed") return result[0]
17200b88d545d8c3d16a191e6e607389619694a9
15,190
def get_item(dictionary, key): """Return value from dictionary. Args: dictionary (dict): Dictionary to retrieve value from. key (str): Key to perform lookup. Returns: Value of key in dictionary. """ return dictionary.get(key)
d79fac838e31ff8eb8ac14b7f7f5abec5073f46c
15,191
def _should_save_report_msg(msg): """Returns True if the given ForwardMsg should be serialized into a shared report. We serialize report & session metadata and deltas, but not transient events such as upload progress. """ msg_type = msg.WhichOneof("type") return msg_type == "initialize" or msg_type == "new_report" or msg_type == "delta"
8e78580aee25ab4a6d1bf40a774a05031231c2e4
15,193
import configparser def prep_dispatcher_supervisord_conf(): """ Prepares the supervisord configuration for dispatcher. :returns: supervisord configuration as a ConfigParser object :rtype: ConfigParser """ config = configparser.ConfigParser() env = 'PYTHONPATH=python:.,ZLOG_CFG="gen/dispatcher/dispatcher.zlog.conf"' cmd = """bash -c 'exec bin/dispatcher &>logs/dispatcher.OUT'""" config['program:dispatcher'] = { 'autostart': 'false', 'autorestart': 'true', 'environment': env, 'stdout_logfile': 'NONE', 'stderr_logfile': 'NONE', 'startretries': '0', 'startsecs': '1', 'priority': '50', 'command': cmd } return config
3385a6d6d18afeb1ce57386b4f9ad87da8abb9df
15,194
import argparse def init_argparse(*args): """Instantiate argparse object""" parser = argparse.ArgumentParser( description='Train a network on dataset and save the model as a checkpoint' ) parser.add_argument('-i', '--input_img', help='Path to image') parser.add_argument('-c', '--checkpoint', help='Path to checkpoint', default='checkpoints') parser.add_argument('-k', '--top_k', help='Return n most likely classes', type=int, default=3) parser.add_argument('-n', '--category_names', help='Use a mapping of categories to real names') parser.add_argument('--gpu', help='Use GPU for predictions; Default is True', action='store_true', default=True) # Initialize with constants if passed in as an argument if args: return parser.parse_args(args[0]) return parser.parse_args()
6a76f3a94c688b49f2f03f76be8281b14bb27014
15,195
def dict_unnest(obj): """Flatten a dictionary containing other dictionaries by concatenating their keys. Parameters ---------- obj : dict A dictionary, which may or may not contain other dictionaries Returns ------- dict A new dictionary, which has been reformatted """ flat_obj = {} for key, value in obj.items(): if isinstance(value, dict): for sub_key, sub_value in dict_unnest(value).items(): flat_obj["{}_{}".format(key, sub_key)] = sub_value elif isinstance(value, list): for idx, sub_value in enumerate(value): flat_obj["{}_{}".format(key, idx)] = sub_value else: flat_obj[key] = value return flat_obj
5c2080dbdd229ba98d154b9efec1a946418e19e1
15,196
def get_X_Y(lockin): """ Get X and Y (Measure) args: lockin (pyvisa.resources.gpib.GPIBInstrument): SRS830 returns: (tuple): X, Y """ X, Y = lockin.query("SNAP? 1,2").split("\n")[0].split(",") X, Y = float(X), float(Y) return X, Y
3d56151042682f86350a499ab639852fc6387887
15,198
import functools import warnings def deprecated(message = "Function %s is now deprecated"): """ Decorator that marks a certain function or method as deprecated so that whenever such function is called an output messaged warns the developer about the deprecation (incentive). :type message: String :param message: The message template to be used in the output operation of the error. :rtype: Decorator :return: The decorator that should be used to wrap a function and mark it as deprecated (send warning). """ def decorator(function): name = function.__name__ if hasattr(function, "__name__") else None @functools.wraps(function) def interceptor(*args, **kwargs): warnings.simplefilter("always", DeprecationWarning) warnings.warn( message % name, category = DeprecationWarning, stacklevel = 2 ) warnings.simplefilter("default", DeprecationWarning) return function(*args, **kwargs) return interceptor return decorator
9c4812653bc74414bed3839e7654f09a114865a0
15,200
def csrf_exempt(func): """Decorate a Controller to exempt it from CSRF protection.""" func.csrf_enabled = False return func
5660d6ef1a9a92a51c4c460f4e9afbe4d5624e0a
15,201
import struct def combine_u32_registers(registers): """ combine two registers for 32-bit int output """ raw = struct.pack('>HH', registers[0], registers[1]) return struct.unpack('>I', raw)[0]
67f245a9fada01a693a3cb46536a0df7baba433a
15,202
import webbrowser import time def get_oauth_pin(oauth_url, open_browser=True): """Prompt the user for the OAuth PIN. By default, a browser will open the authorization page. If `open_browser` is false, the authorization URL will just be printed instead. """ print('Opening: {}\n'.format(oauth_url)) if open_browser: print(""" In the web browser window that opens please choose to Allow access. Copy the PIN number that appears on the next page and paste or type it here: """) try: r = webbrowser.open(oauth_url) time.sleep(2) if not r: raise Exception() except: print(""" Uh, I couldn't open a browser on your computer. Please go here to get your PIN: """ + oauth_url) else: print(""" Please go to the following URL, authorize the app, and copy the PIN: """ + oauth_url) return input('Please enter the PIN: ').strip()
74d7c691cfaa42d717e017fe227b73669ffe3a6d
15,204
def exception_models_to_message(exceptions: list) -> str: """Formats a list of exception models into a single string """ message = "" for exception in exceptions: if message: message += "\n\n" message += f"Code: {exception.code}" \ f"\nMessage: {exception.message}" \ f"\nSeverity: {exception.severity}" \ f"\nData: {exception.data}" return message
56f40ab1fe0d1a03abeaa08d1f9898c7abdb0552
15,205
import re def parse_version(str_): """ Parses the program's version from a python variable declaration. """ v = re.findall(r"\d+.\d+.\d+", str_) if v: return v[0] else: print("cannot parse string {}".format(str_)) raise KeyError
7def768ec9790d610922774eb4aab22f4531b1d3
15,207
def cut_string(string: str, length: int) -> list: """ 每隔l个字符切分字符串 :param string: 字符串 :param length: 切分长度 :return: 切分后产生的list """ string = string.strip().replace('\n', ' ') res, cur, cnt = [], '', 0 for i in string: cnt += 2 if ord(i) > 255 else 1 if cnt <= length: cur += i else: res.append(cur) cur, cnt = i, 2 if ord(i) > 255 else 1 if cur: res.append(cur) return res
830fdfad42c8c93d49eb2260e8f317b01c89b3dd
15,208
from typing import Dict def _json_list2dict(d:Dict)->Dict: """ Loop through all fields, and once it meet a list, it convert into a dict. The converted output contains the index of the list as a key. Conversion is done deeply to last level. Parameters ---------- d : Dict initial dict to convert Returns ------- Dict converted dict """ for key, val in d.items(): # convert list 2 dict with key as the index if it contains a container if isinstance(val, list) \ and len(val) > 0 \ and isinstance(val[0], (list, dict)): val = {str(k):v for k, v in enumerate(val)} # recursion (even for the newly converted list) if isinstance(val, dict): val = _json_list2dict(val) d[key] = val return d
246387cc4a9c9b384f7c4ddc29d17cd916875146
15,209
def missing_percentage(ssnvs_matrix): """ Calculate missing percentage to detect correct folder name output from siclonefit :return: """ missing_percent = str(ssnvs_matrix.isnull().sum().sum() / (ssnvs_matrix.shape[0] * ssnvs_matrix.shape[1])).split(".")[1][0:2] if len(missing_percent) !=2: missing_percent= missing_percent+ "0" missing = missing_percent + "p_missing_samples" return missing
a82c0252996db14c42143895ba87d58460ea797c
15,211
def generating_destination(trips): """ lookup vlaues from next row for same bike """ trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'trip_end_time'] = trips['dt_min_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'to_station'] = trips['station_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'to_station_id'] = trips['station_id_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start'])), 'to_station_mode'] = trips['station_mode_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'to_lat'] = trips['lat_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'to_long'] = trips['long_next_row'] trips.loc[( (trips['bike_id'] == trips['bike_next_row']) & (trips['dt_min_next_row'] > trips['dt_start']) ), 'trip_duration'] = trips['diff'] return trips
50c9549f42de7f5bba5c15430651e3c0d813b31f
15,212
import re def clean_license_name(license_name): """Remove the word ``license`` from the license :param str license_name: Receives the license name :return str: Return a string without the word ``license`` """ return re.subn(r'(.*)\s+license', r'\1', license_name, flags=re.IGNORECASE)[0]
970d933911b69ba9a1f33a768bc68032334d41c3
15,213
import inspect import os def is_repl(): """Return True if running in the Python REPL.""" root_frame = inspect.stack()[-1] filename = root_frame[1] # 1 is the filename field in this tuple. if filename.endswith(os.path.join("bin", "ipython")): return True # <stdin> is what the basic Python REPL calls the root frame's # filename, and <string> is what iPython sometimes calls it. if filename in ("<stdin>", "<string>"): return True return False
f13a186d2c334fe705a3e05ebeca3af4484c7f4f
15,214
import csv def read_csv_wind_parameters(csv_uri, parameter_list): """Construct a dictionary from a csv file given a list of keys in 'parameter_list'. The list of keys corresponds to the parameters names in 'csv_uri' which are represented in the first column of the file. csv_uri - a URI to a CSV file where every row is a parameter with the parameter name in the first column followed by the value in the second column parameter_list - a List of Strings that represent the parameter names to be found in 'csv_uri'. These Strings will be the keys in the returned dictionary returns - a Dictionary where the the 'parameter_list' Strings are the keys that have values pulled from 'csv_uri' """ csv_file = open(csv_uri, 'rU') csv_reader = csv.reader(csv_file) output_dict = {} for csv_row in csv_reader: # Only get the biophysical parameters and leave out the valuation ones if csv_row[0].lower() in parameter_list: output_dict[csv_row[0].lower()] = csv_row[1] csv_file.close() return output_dict
80f8e46f8739975b51ab320b9e6ce747c7a642d7
15,215
def is_slice_notation(value): """Return True if the value is made up of multiple""" return isinstance(value, str) and (":" in value)
71926dfb60d018b56546d8c6b02985d5a8262ae5
15,216
import requests def get_cached_data(url): """Wrap requests with a short cache.""" source_data = requests.get(url).json() return source_data
c1cd251db1b915024bad6bc590160c38e61c38bb
15,217
from typing import List def linspace(start: float, stop: float, n: int) -> List[float]: """Returns a List of linearly spaced numbers.""" if n <= 0: return [] elif n == 1: return [start] else: step = (stop - start)/(n - 1) return [start + step*i for i in range(n)]
0ba6eb029f96cb662c48c8c8b997a73e2f5a9bc9
15,218
import argparse def parse_cli(): """Return dictionary with CLI options""" parser = argparse.ArgumentParser( description='Change in bulk lines in text files') parser.add_argument('-d', '--default', help='default new value when not specified in the configuration file') parser.add_argument('-o', '--out', help='write to this file') parser.add_argument('-b', '--begin', help='start counting from this value. Defaults to 1', default=1) parser.add_argument('-q', '--quiet', help="Don't print the output to STDOUT. Default to False", action="store_true", default=False) parser.add_argument('input', help='the input file to modify') parser.add_argument('config', help='the configuration file with the list of line indices to modify') return parser.parse_args().__dict__
b8c9c145be5b184fe08dfd9f1547747c38f5944d
15,221
def ellipsis_after(text, length): """ Truncates text and adds ellipses at the end. Does not truncate words in the middle. """ if not text or len(text) <= length: return text else: return text[:length].rsplit(' ', 1)[0]+u"\u2026"
9b15c5e8f63caec0a7327ae1ce872bac932208ab
15,222
def order_columns(dataset, col_names, feature_to_predict): """ Method is responsible of putting the 'feature_to_predict' name as the first column of the dataset and sorts the other columns names. Parameters ---------- dataset col_names = column names to be sorted feature_to_predict = feature to be predicted Returns ------- Dataset which has 'feature_to_predict' as the first row of the dataset. """ col_names.remove(feature_to_predict) col_names.sort() sorted_column_list = [feature_to_predict] for col_name in col_names: if col_name != feature_to_predict: sorted_column_list.append(col_name) dataset = dataset.reindex(sorted_column_list, axis=1) return dataset
05cb77502defa2648c6cc9279cf130d07ffd9bb7
15,223
def memoize_dropdown(func): """Checks if the dropdown list has already been created. If it did, it returns the cached list. otherwise, it creates one. """ dropdown_cache = [] def wrapper(): if not dropdown_cache: for city in func(): dropdown_cache.append((city, city)) return dropdown_cache return wrapper
728e105d4e45c9504790bd68253c0d37f122cff5
15,224
def modulo(x, y): """Divides the first parameter by the second and returns the remainder. Note that this function preserves the JavaScript-style behavior for modulo. For mathematical modulo see mathMod""" return x % y
efea1409596564a393cae7b46afe17141326552b
15,225
import socket def get_host_name(): """Returns the name of the host.""" return socket.gethostname()
feddd2a5bf674ff956b280d02649f5872b61991b
15,226
def sortArrayByParity2(A): """ :type A: List[int] :rtype: List[int] """ i, j = 0, len(A) - 1 while i < j: if A[i] % 2 == 1 and A[j] % 2 == 0: A[i], A[j] = A[j], A[i] if A[i] % 2 == 0: i+=1 if A[j] % 2 == 1: j-=1 return A
048786203eab589fa74a78ddfc2bc05a104a8f4e
15,227
import typing def merge_reports(reports: typing.Iterable[dict]) -> dict: """Merge the size reports in reports. :param reports: The reports to merge. :return: the merged report. """ final_report = dict() for report in reports: for k, v in report.items(): final_report[k] = final_report.get(k, 0) + v return final_report
7045d0e7f8047678a5bdb90c885717bc917cb46a
15,228
def uncover_dash(guess, answer, dashed_word): """ :param guess: str, the character that user type and has been converted to upper case :param answer: str, the answer word :param dashed_word: str, the present dashed_word :return: str, the new_dashed_word with all correct guess been shown in dashed_word """ new_dashed_word = '' for i in range(len(answer)): if guess == answer[i]: new_dashed_word += guess else: new_dashed_word += dashed_word[i] return new_dashed_word
30e2134e607ef24071cbb50e43723ae0d5fc88a9
15,229
import math def angle_3p(p0, p1, p2): """get the angle between three 3D points, p0 is the intersection point""" u, v = p1-p0, p2-p0 costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v)) return math.degrees(math.acos(costheta))
3a5ac0c4d0cfa8ae97e979175292fe416cc683b8
15,230
import hmac import hashlib def mailgun_signature(timestamp, token, webhook_signing_key): """Generates a Mailgun webhook signature""" # https://documentation.mailgun.com/en/latest/user_manual.html#securing-webhooks return hmac.new( key=webhook_signing_key.encode('ascii'), msg='{timestamp}{token}'.format(timestamp=timestamp, token=token).encode('ascii'), digestmod=hashlib.sha256).hexdigest()
64d549e53a7c8f6842d176caa501335a0bc1ca61
15,232
import copy def create_annotation_data(save_data): """Returns the final annotation JSON which is a cleane dup version of the save JSON""" #annotation_data = save_data annotation_data = {} annotation_data["image"] = copy.deepcopy(save_data["image"]) annotation_data["grid_cells"] = copy.deepcopy(save_data["grid_cells"]) # remove ids from corners in PV modules try: for p in annotation_data["grid_cells"]: corners = p["corners"] for corner in corners: del corner["id"] except KeyError: pass return annotation_data
ee8ceb51cac98b7ac795c9bfefd525d23168d6d1
15,233
def _parse_data_row(row, columns, counters, **options): """Parse table data row. If a cell has multiple tags within it then each will be seperated by `sep` character. Parameters ---------- row : BeautifulSoup Tag object A <tr> tag from the html, with data in at least one cell. columns : list The list of column headers for the table. counters : dict Counters used for propogating multirow data. sep : string, optional (default='') Seperator between multiple tags in a cell. Returns ------- row_processed : list The processed row. """ sep = options.pop('sep', '') cells = row.find_all(['th', 'td']) cell_cursor = 0 row_processed = [] for col in columns: # Check if values to propagate if counters[col][0] > 0: cell_value = counters[col][1] counters[col][0] -= 1 # If not propagate, get from cell elif cell_cursor < len(cells): cell = cells[cell_cursor] rowspan = int(cell.attrs.pop('rowspan', 1)) cell_value = sep.join(cell.stripped_strings) if rowspan > 1: counters[col] = [rowspan - 1, cell_value] cell_cursor += 1 # If cursor out of index range, assume cells missing from else: cell_value = None row_processed.append(cell_value) return row_processed
500c9634b8110575d3c7800a0a1f2616fa08ac03
15,234
def generate_wpe_labels(labels): """ :param probs: [batch, seq_len, n_words] :param labels: [batch, seq_len] :return: out: labels with ignored position being padding_idx. [batch, 1, seq_len] """ # [batch, 1, seq_len] out = labels.unsqueeze(1) return out
6a32a0bd29ca20b543a1b052c0284a6e0d3127f0
15,236
def FL(f): """get fully-parenthesized string representation of formula""" return '(' + f.to_str('spot', full_parens=True) + ')'
5d35b96ed7089294d85ca31ed24c8824a243e7ee
15,238
def add_argument(parser, flag, type=None, **kwargs): """Wrapper to add arguments to an argument parser. Fixes argparse's behavior with type=bool. For a bool flag 'test', this adds options '--test' which by default sets test to on, and additionally supports '--test true', '--test false' and so on. Finally, 'test' can also be turned off by simply specifying '--notest'. """ def str2bool(v): return v.lower() in ('true', 't', '1') if flag.startswith('-'): raise ValueError('Flags should not have the preceeding - symbols, -- will be added automatically.') if type == bool: parser.add_argument('--' + flag, type=str2bool, nargs='?', const=True, **kwargs) parser.add_argument('--no' + flag, action='store_false', dest=flag) else: parser.add_argument('--' + flag, type=type, **kwargs)
d802615f737c806be97962f18fdd7e8057512f96
15,239
def group_fasta(fa): """ return: {>id: ATCG..., } """ ids = [] seqs = [] seq = '' with open(fa, 'r') as fo: n = 0 while True: line = fo.readline().strip('\n') if line.startswith('>'): ids.append(line) if seq: seqs.append(seq) seq = '' else: if line: seq += line if line == '': seqs.append(seq) break n += 1 seqmap = dict(zip(ids, seqs)) return seqmap
821746f46a0458c99e34f38f175fc36dc3550a9c
15,240
import time def convert_from_html_time(html_time): """Converts times sent through html forms to dates suitable for the database html_time - Time of the format 9:00 AM returns number of minutes since 12:00 AM """ parsed = time.strptime(html_time, "%I:%M %p") return parsed.tm_hour * 60 + parsed.tm_min
89f8fe3ee1be7abb55cb523c73dc130cc38825d7
15,242
def init(runtime_id, name): """ Intializes the data source. Returns the initial state of the data source. """ return {}
b90f6bb131df8c1e47bd8fead95273613b59ac3a
15,244
def identity(x): """Returns whatever is passed.""" return x
e18cfe924da5195d2608f1808b17f142e23a83da
15,245
def _get_depth(matrix, vec, height): """ Get the depth such that the back-projected point has a fixed height""" return (height - matrix[1, 3]) / (vec[0] * matrix[1, 0] + vec[1] * matrix[1, 1] + matrix[1, 2])
b0db2823a47b110c302595a4c281e81e01bf7b2f
15,246
import bisect def get_window(chromosome, target_peaks, distance): """ Returns a window of all peaks from a replicate within a certain distance of a peak from another replicate. """ lower = target_peaks[0].midpoint upper = target_peaks[0].midpoint for peak in target_peaks: lower = min(lower, peak.midpoint - distance) upper = max(upper, peak.midpoint + distance) start_index = bisect.bisect_left(chromosome.keys, lower) end_index = bisect.bisect_right(chromosome.keys, upper) return (chromosome.peaks[start_index: end_index], chromosome.name)
875549b6000c4114b07b9de970b2b7071b704e2c
15,247
def bitwise_dot_product(bs0, bs1): """ A helper to calculate the bitwise dot-product between two string representing bit-vectors :param String bs0: String of 0's and 1's representing a number in binary representations :param String bs1: String of 0's and 1's representing a number in binary representations :return: 0 or 1 as a string corresponding to the dot-product value :rtype: String """ if len(bs0) != len(bs1): raise ValueError("Bit strings are not of equal length") return str(sum([int(bs0[i]) * int(bs1[i]) for i in range(len(bs0))]) % 2)
edb928ead1c7e9bcd2756174629209eeab3cef87
15,248
import os def empty_file(path): """Return False for an empty file""" bool = os.stat(path).st_size == 0 return(bool)
e854a318cb17525cf6638577af9aba8688244d25
15,249
import shutil def binary_available() -> bool: """Returns True if the Radix CLI binary (rx) is availabile in $PATH, otherwise returns False. """ return shutil.which("rx") is not None
fa814b20b3fe47096dfed64bde3f9cb33e9b711d
15,250
def prep_difflines(content): """ difflib takes input in this "readlines" compatible format """ return [ x+"\n" for x in content.split("\n") ]
192f198285598e3fe69c6155459974f7664af090
15,251
def model_pred_on_gen_batch(model, b_gen, b_idx=0): """ Predict on model for single batch returned from a data generator. Returns predictions as well as corresponding targets. """ # predict on model X,y = b_gen.__getitem__(b_idx) pred = model.predict_on_batch(X) return pred, y
1a23903f088fc61b96060ab913ddbc554d5584a6
15,252
def compute_F1(cm): """ For Binary classification problems only :param cm: :return: """ assert len(cm.shape) and cm.shape[0] == cm.shape[1] and cm.shape[0] == 2 TP = cm[1, 1] TN = cm[0, 0] FN = cm[1, 0] FP = cm[0, 1] assert (2*TP + FP + FN) != 0 return float(2*TP)/(2*TP + FP + FN)
4c0b53dc78f7f52d2fa3ffa1fc5492dab2b7dba4
15,253
import time def mk_epoch_from_utc(date_time): """ Convert UTC timestamp to integer seconds since epoch. Using code should set os.environ['TZ'] = 'UTC' """ pattern = '%Y%m%d-%H%M%S' return int(time.mktime(time.strptime(date_time, pattern)))
070cc8b6ad23e9b590b35d014d7accdab5649a16
15,254
def reg_copy(reg, keys=None): """ Make a copy of a subset of a registry. :param reg: source registry :param keys: keys of registry items to copy :return: copied registry subset """ if keys is None: keys = reg.keys() reg_cls = type(reg) new_reg = reg_cls() mk = {} # empty dictionary for meta keys # loop over registry meta names for m in reg_cls.meta_names: mstar = getattr(reg, m, None) # current value of metakey in registry if not mstar: # if there is no value, the value is empty or None, set it to None # it's never false or zero, should be dictionary of reg items mk[m] = None continue mk[m] = {} # emtpy dictionary of registry meta # loop over keys to copy and set values of meta keys for each reg item for k in keys: kstar = mstar.get(k) # if key exists in registry meta and is not None, then copy it if kstar is not None: mk[m][k] = kstar new_reg.register({k: reg[k] for k in keys}, **mk) return new_reg
bbf7c5f7074996b5623bde5e8e9fa5d7e1fcf6fa
15,255
def get_runscript(self) -> str: """Run a parallel version of CP2K without mpirun or srun, \ as this can cause issues with some executables. This method is monkey-patched into the PLAMS ``Cp2kJob`` class. """ cp2k_command = self.settings.get("executable", "cp2k.ssmp") return f"{cp2k_command} -i {self._filename('inp')} -o {self._filename('out')}"
1f9ea38a6171881848919d88eb9c48a0e649f504
15,256
import pickle def _get_results(args, bhv, num_epochs = 10,k_param = 10, perm=False): """ load results pkl file Args: k_param: k_hidden if model_type = 'lstm' k_wind if model_type = 'tcn' k_layers if model_type = 'ff' corr_thresh if model_type = 'cpm' k_components if model_type = 'bbs' """ res_path = ( args.RES_DIR + '/roi_%d_net_%d' %(args.roi, args.net) + '_nw_%s' %(args.subnet) + '_bhv_%s' %(bhv) + '_trainsize_%d' %(args.train_size)) perm_res_path = ( args.RES_DIR + '_perm' '/roi_%d_net_%d' %(args.roi, args.net) + '_nw_%s' %(args.subnet) + '_bhv_%s' %(bhv) + '_trainsize_%d' %(args.train_size)) if args.model_type == 'gru': res_path += ( '_kfold_%d_k_hidden_%d' %(args.k_fold, k_param) + '_k_layers_%d_batch_size_%d' %(args.k_layers, args.batch_size) + '_num_epochs_%d_z_%d.pkl' %(num_epochs, args.zscore)) perm_res_path += ( '_kfold_%d_k_hidden_%d' %(args.k_fold, k_param) + '_k_layers_%d_batch_size_%d' %(args.k_layers, args.batch_size) + '_num_epochs_%d_z_%d.pkl' %(num_epochs, args.zscore)) elif args.model_type == 'tcn': res_path += ( '_kfold_%d_k_hidden_%d' %(args.k_fold, args.k_hidden) + '_k_wind_%d_batch_size_%d' %(k_param, args.batch_size) + '_num_epochs_%d_z_%d.pkl' %(num_epochs, args.zscore)) elif args.model_type == 'ff': res_path += ( '_kfold_%d_k_hidden_%d' %(args.k_fold, args.k_hidden) + '_k_layers_%d_batch_size_%d' %(k_param, args.batch_size) + '_num_epochs_%d_z_%d.pkl' %(num_epochs, args.zscore)) elif args.model_type == 'cpm': res_path += ( '_corrthresh_%0.1f' %(k_param) + '_kfold_%d_z_%d.pkl' %(args.k_fold, args.zscore)) elif args.model_type == 'bbs': res_path += ( '_kcomponents_%0.1f' %(k_param) + '_kfold_%d_z_%d.pkl' %(args.k_fold, args.zscore)) with open(res_path, 'rb') as f: r = pickle.load(f) if perm == True: with open(perm_res_path, 'rb') as f: r_perm = pickle.load(f) return r, r_perm else: return r
d1b5a496d068d49209e550dc83ac77e7d5450594
15,257
import numpy def twoLeg(L1, L2, th1, th2): """ B \ \ang2/ \ / A\/ / / / ang1 ______O/_______ """ th1 = numpy.radians(th1) th2 = numpy.radians(th2) origin = numpy.array([0, 0]) origin.shape = (2, 1) #nice to be explicit here A = origin + L1*numpy.array([numpy.cos(th1), numpy.sin(th1)]) B = A + L2*numpy.array([numpy.cos(th2+th1), numpy.sin(th2+th1)]) return B
e35fad473cbad0918b11a153b1dfadca678e10f4
15,259
def is_background(sample): """Return ``True`` if given sample is background and ``False`` otherwise.""" background_relations = sample.resolwe.relation.filter( type='compare', label='background', entity=sample.id, position='background' ) return len(background_relations) > 0
ba2bac31991798d1cfb1c00a27aaeb4dbc79a73b
15,260
def is_url_valid(youtube_url): """ Checks a given url is valid or not :param youtube_url: is URL :return: True if the URL is valid, otherwise False """ if "https://www.youtube.com/watch?v=" not in youtube_url: return False return True
32bb09da3071504a3c7857e6a028d9925447f3d4
15,261
import torch def ravel_state_dict(state_dict): """ state_dict: all variables, incluing parameters and buffers """ li = [] for _, paras in state_dict.items(): li.append(paras.view(-1)) return torch.cat(li)
16e5fe5ec07357f694c5665aba96d7ca2ce53796
15,263
def reverse_list_iterative(head): """ Reverse a singly linked list by iterative method :param head: head node of given linked list :type head: ListNode :return: head node of reversed linked list :rtype: ListNode """ curr = head prev = None while curr is not None: tmp = curr.next curr.next = prev prev = curr curr = tmp return prev
c6795a6bc3356955a939396feafc1fb7d745333f
15,265
import decimal def RichardsonExtrapolation(fh, fhn, n, k): """Compute the Richardson extrapolation based on two approximations of order k where the finite difference parameter h is used in fh and h/n in fhn. Inputs: fh: Approximation using h fhn: Approximation using h/n n: divisor of h k: original order of approximation Returns: Richardson estimate of order k+1""" n = decimal.Decimal(n) k = decimal.Decimal(k) numerator = decimal.Decimal(n**k * decimal.Decimal(fhn) - decimal.Decimal(fh)) denominator = decimal.Decimal(n**k - decimal.Decimal(1.0)) return float(numerator/denominator)
899514b887020980a3bceb4a0dcfd0abcffd1063
15,268
def read_ephem_file(infile): """Read the ephem file.""" name, ra, dec, epoch, period = [], [], [], [], [] with open(infile, 'r') as f: for line in f: data = line.split() name.append(data[0]) ra.append(float(data[1])) dec.append(float(data[2])) epoch.append(float(data[3])) period.append(float(data[4])) return name, ra, dec, epoch, period
4a49db0803352a74adfddab84e9c8e2c44c26756
15,269
def hex_str_to_int(value: str) -> int: """ convert a hexadecimal string to integer '0x1b1b' -> 6939 """ return int(value, 16)
6c7187fb94528a0342b28e4633d03ba797d9d354
15,270
def color(color_): """Utility for ability to disabling colored output.""" return color_
ab99444124a14e64857226cb6d163f81e65d3eff
15,272
import json def open_map_file(difficulty): """This function opens the map file listed in the info.dat file for the specificed difficulty level.""" with open('./temporary/info.dat', 'rb') as i: info = json.load(i) for x in info['_difficultyBeatmapSets']: if x['_beatmapCharacteristicName'].casefold() == 'Standard'.casefold(): for y in x['_difficultyBeatmaps']: if y['_difficulty'].casefold() == difficulty.casefold(): file_name = y['_beatmapFilename'] with open(f"./temporary/{file_name}", 'rb') as f: map_file = json.load(f) return map_file
301716cd4c75f2843a75d04c6e0118bcbd76e065
15,275
def compare_dicts(before, after): """ Comparing 2 dicts and providing diff list of [added items, removed items] Args: before (dict): Dictionary before execution after (dict): Dictionary after execution Returns: list: List of 2 lists - ('added' and 'removed' are lists) """ added = [] removed = [] uid_before = [ uid.get('metadata').get( 'generateName', uid.get('metadata').get('name') ) for uid in before ] uid_after = [ uid.get('metadata').get( 'generateName', uid.get('metadata').get('name') ) for uid in after ] diff_added = [val for val in uid_after if val not in uid_before] diff_removed = [val for val in uid_before if val not in uid_after] if diff_added: added = [ val for val in after if val.get('metadata').get( 'generateName', val.get('metadata').get('name') ) in [v for v in diff_added] ] if diff_removed: removed = [ val for val in before if val.get('metadata').get( 'generateName', val.get('metadata').get('name') ) in [v for v in diff_removed] ] return [added, removed]
26bf3b345d07d656eed2e2cc634072c459c4a5c0
15,276
def str_str(haystack, needle): """ Solution 1 超时 """ if needle is None or len(needle) == 0: return 0 if len(haystack) < len(needle): return -1 for i in range(len(haystack)): if needle[0] != haystack[i]: continue j = 0 while j < len(needle): if i + j >= len(haystack): break if needle[j] != haystack[i + j]: break j += 1 if j == len(needle): return i return -1
8e90ed2c7c4f4a880594f66487742cc939480aa8
15,277
def make_integer(value): """Returns a number in a string format like "10,000" as an integer.""" return int(value.replace(",", ""))
9999c3476afa70366275402b2d327a9e42dfe4d7
15,278