content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from bs4 import BeautifulSoup def get_tag_by_field(text_or_soup, tag_name, field, expr): """Returns HTML tag in text if expr is in its field. If not found, returns None. """ if isinstance(text_or_soup, BeautifulSoup): soup = text_or_soup else: soup = BeautifulSoup(text_or_soup) for tag in soup.find_all(tag_name): if expr in tag.get(field, ''): return tag return None
4d792c5da4bf4621142c5efa04a4e437a7d70619
94,586
def check_key_value(data, key, value): """Checks a key for the given value within a dictionary recursively.""" if isinstance(key, dict): for k, v in key.items(): return check_key_value(data[k], v, value) if data[key] == value: return True return False
e4a519cd6bff73cc3a077ca98550c130b9653f0f
94,587
import re def whitespace_trimmer(text, loader_context): """ Removes all whitespace at either end of the string and replaces every substring with more than one internal space with a single space. Example: ' foo bar baz ' -> 'foo bar baz' """ return re.sub(r'\s+', ' ', text.strip())
bd88c265405d171b960f35789513f5f4cfa249d6
94,593
def create_X_y(train, val, test, target): """creates X_ and y_ dataframes from train, val, test for model building""" X_train = train.drop(target, axis=1) y_train = train[target] X_val = val.drop(target, axis=1) y_val = val[target] X_test = test.drop(target, axis=1) y_test = test[target] return X_train, y_train, X_val, y_val, X_test, y_test
4edc40fcff33516f0c0f05a8293b929590030726
94,598
def quick_sort(in_list, front): """ 快速排序 :param in_list: 待排序数组成的列表 :param front: 排序方向,0为从小到大排序,1为从大到小排序 """ def quick_sort_bt(left, right): if left > right: return base = left i = left j = right while j != i: if front == 1: while (out_list[j] <= out_list[base]) and (i < j): j = j - 1 while (out_list[i] >= out_list[base]) and (i < j): i = i + 1 else: while (out_list[j] >= out_list[base]) and (i < j): j = j - 1 while (out_list[i] <= out_list[base]) and (i < j): i = i + 1 if i < j: out_list[i], out_list[j] = out_list[j], out_list[i] out_list[base], out_list[j] = out_list[j], out_list[base] quick_sort_bt(left, j - 1) quick_sort_bt(j + 1, right) out_list = list(in_list) quick_sort_bt(0, len(out_list) - 1) return out_list
37e1690f568cd4967ecff3f8473d0306840b7301
94,602
from datetime import datetime def get_timestamp(dt): """ Python2 compatible way to compute the timestamp (seconds since 1/1/1970) """ return (dt.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds()
e66e007e005460d3535db433ed723f63a46d0f9b
94,610
def fixed_length_split(data, width): """ 固定长度分割字符串 :param data: :param width: :return: """ # 使用正则的方法 # import re # split = re.findall(r'.{%s}' % width, string) return [data[x: x + width] for x in range(0, len(data), width)]
364f7644278b6e46e38820645fd64befd85b3caf
94,618
import random def random_int(min, max): """ Get random integer :param min: Minimum value (included) :param max: Maximum value (included) :return: Random integer """ return random.randint(min, max)
dd0b70b0a7a3957a2bcc4200eb03a28306886dc7
94,619
def minmax(t, u): """ Compute all local minima and maxima of the function u(t), represented by discrete points in the arrays u and t. Return lists minima and maxima of (t[i],u[i]) extreme points. """ minima = []; maxima = [] for n in range(1, len(u)-1, 1): if u[n-1] > u[n] < u[n+1]: minima.append((t[n], u[n])) if u[n-1] < u[n] > u[n+1]: maxima.append((t[n], u[n])) return minima, maxima
34525c626ca9aba2a1131367749cf789c4c783f3
94,624
def niceNum(num, precision=0): """Returns a string representation for a floating point number that is rounded to the given precision and displayed with commas and spaces.""" return format(round(num, precision), ',.{}f'.format(precision))
f970a31fdd31280642d15476fd94a570f1364945
94,626
def stagger_tuple(elements_list, initial=None): """ Converts a list of objects into a staggered tuple Example: [1, 2, 3, 4, 5] [(1, 2), (2, 3), (3, 4), (4, 5)] """ res = [] previous_element = initial for element in elements_list: if previous_element is not None: res.append((previous_element, element)) previous_element = element return res
84578b77dc3b2dc6d8a2d36c0269e9e89ababdd9
94,630
def _clean_values(values): """ Clean values to the state that can be used in Sheets API :type values: list :param values: Row values to clean :rtype: list :return: Cleaned values, in the same order as given in function argument """ return [value if value is not None else '' for value in values]
64556e79cfe019189b4df303bc26cd25a8980b14
94,633
def _FindFirst(itr, matcher): """Finds a value in an iterable that matches the matcher. Args: itr: (iterable[object]) Iterable. matcher: Function accepting a single value and returning a boolean. Returns: The first value for which the matcher returns True. If no value matches, return None. """ return next((x for x in itr if matcher(x)), None)
15ec84d6140538756cf7dfd3cadb6cf9fd3be896
94,634
import yaml def read_yaml(yaml_file: str) -> dict: """Read a YAML file. :param yaml_file: Full path with file name and extension to an input YAML file :type yaml_file: str :return: Dictionary """ with open(yaml_file, 'r') as yml: return yaml.load(yml, Loader=yaml.FullLoader)
d4ded061f1b35fd6b9b3f033185448ecc21d6bae
94,637
def tag_type_check(tag_buffer, tag_list): """ Check if tag buffer is in or is potentially in tag list :param tag_buffer: string :param tag_list: list of strings :return: indicator as list """ temp = [] for i in tag_list: if i == tag_buffer: return [tag_buffer] elif i.startswith(tag_buffer): temp = [''] return temp
328f6174fcf65a5f335a49bf14222b3a3873210a
94,640
import torch def default_ray_sampling(ray_batch, N_samples, lindisp, perturb): """Samples coarse samples along rays. Args: ray_batch: array of shape [batch_size, ...]. All information necessary for sampling along a ray, including: ray origin, ray direction, min dist, max dist, and unit-magnitude viewing direction. N_samples: int. Number of different times to sample along each ray. lindisp: bool. If True, sample linearly in inverse depth rather than in depth. perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified random points in time. Returns: z_vals: [num_rays, num_samples along ray]. Integration time. pts: Sampled points. """ # batch size N_rays = ray_batch.size()[0] # Extract ray origin, direction. rays_o, rays_d = ray_batch[:, 0:3], ray_batch[:, 3:6] # [N_rays, 3] each # Extract lower, upper bound for ray distance. bounds = ray_batch[..., 6:8].view(-1, 1, 2) near, far = bounds[..., 0], bounds[..., 1] # [-1,1] # Decide where to sample along each ray. Under the logic, all rays will be sampled at # the same times. t_vals = torch.linspace(0., 1., N_samples) if not lindisp: # Space integration times linearly between 'near' and 'far'. Same # integration points will be used for all rays. z_vals = near * (1.-t_vals) + far * (t_vals) else: near += 1e-10 far += 1e-10 # Sample linearly in inverse depth (disparity). z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals)) z_vals = torch.broadcast_to(z_vals, (N_rays, N_samples)) # Perturb sampling time along each ray. if perturb > 0.: # get intervals between samples mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1]) upper = torch.cat([mids, z_vals[..., -1:]], -1) lower = torch.cat([z_vals[..., :1], mids], -1) # stratified samples in those intervals t_rand = torch.rand(z_vals.size()) z_vals = lower + (upper - lower) * t_rand # Points in space to evaluate model at. pts = rays_o[..., None, :] + rays_d[..., None, :] * \ z_vals[..., :, None] # [N_rays, N_samples, 3] return z_vals, pts
7c90d76d48e4534074d17370df7050e1930fa74f
94,642
def get_lowest_cost_path(paths): """ Gets the path with the lowest cost from a set of paths. paths is a set of format: {((node_1,..., node_n), cost)}. Returns path with format: ((node, node, node), cost). """ return min(paths, key = lambda t: t[1])
46b569c385736f1a4ad9ae015c262572520e1dc9
94,649
def date_object_to_rfc3339(date): """Converts a datetime object to a RFC3339 date string. Args: date: Date object (e.g datetime.datetime(2017, 12, 27 , 0, 0, 0, 0)). Returns: Corresponding RFC3339 date string (e.g: 2017-12-27T00:00:00Z). """ return date.strftime('%Y-%m-%dT%H:%M:%SZ')
0b558977d4148c8a462f8aef51b93be4ce11cc6e
94,651
def can_write_read(read_and_alignment, current_position): """Returns true if the first read in the cache can safely be written. This will be the case if the read was not the first in a set of reads with the same alignment, or if the current position has gone beyond the last base covered in that alignment. """ _, alignment = read_and_alignment if alignment is None: return True current_ref_id, current_ref_start = current_position alignment_ref_id, _, _, alignment_ref_end = alignment return alignment_ref_id != current_ref_id or alignment_ref_end < current_ref_start
72e88b60c0aaae56ccc3028786a9973e79063bd7
94,654
def merge_config(args, config): """This function merges the args and the config together. The command line arguments are prioritized over the configured values. :param args: command line arguments :type args: dict :param config: option from the config file :type config: dict :return: dict with values merged """ for key, value in args.items(): if value is not None: config[key] = value if 'domain' not in config: config['domain'] = config['homeserver'] return config
3875170b72c424011168d6aaa7a5bfd13b6efd29
94,665
import re def get_fun_name(line): """ Get function name from a line. >>> get_fun_name('function(foo)') 'foo' >>> get_fun_name('function(foo BAR BAZ)') 'foo' >>> get_fun_name('function(foo') 'foo' >>> get_fun_name('set(') """ match = re.match(r'(function|macro)\s*\((\w+)', line) if not match: return return match.groups()[1]
dde4dd19a027d9315fa69dd576998b4585a2973f
94,668
def is_channels_last(node): """Returns whether node is channels last, so (N, ..., C).""" return not node.data_format.startswith("NC")
4f6c94e858e092d4770fa8fba4f98b1fc4c49a0e
94,670
def amino_function(weight: float, sport_a_week: int) -> float: """ Calculates 24hour calory need. Returns -1 in any error case. :param weight: float, kilograms :param sport_a_week: int, times. do sport times a week amount. :return: """ amino = -1 if sport_a_week <= 0: amino = 1.4*weight elif 3 >= sport_a_week >= 1: amino = 2.2*weight elif 5 >= sport_a_week > 3: amino = 3.4*weight elif 7 >= sport_a_week > 5: amino = 4*weight else: amino = weight return amino
ac4686a2d2f64d323342158939dc45d3c6eb7245
94,671
import math def percentageFloor(x): """ Returns a float which is the input rounded down to the neared 0.01. e.g. precentageFloor(0.941354) = 0.94 """ return math.floor(x*100) / 100.0
77fbf5df97c94ac22a7b1243962788d581199fa7
94,678
def is_left(p0, p1, p2): """ is_left(): tests if a point is Left|On|Right of an infinite line. Input: three points P0, P1, and P2 Return: >0 for P2 left of the line through P0 and P1 =0 for P2 on the line <0 for P2 right of the line See: Algorithm 1 "Area of Triangles and Polygons" http://geomalgorithms.com/a03-_inclusion.html :param p0: point [x,y] array :param p1: point [x,y] array :param p2: point [x,y] array :return: """ v = (p1[0] - p0[0]) * (p2[1] - p0[1]) - (p2[0] - p0[0]) * (p1[1] - p0[1]) return v
8f7ff0161716e38ae91a9296fd84e730f0dcca0f
94,679
import math def floor(num: int) -> int: """Returns the number rounded down""" floor: int = math.floor(num) return floor
b8b8ffcde853c94f64556f3e6016aa226f654c60
94,680
def parse_platform_specific(cfg, is_linux): """Recursive function that will parse platform specific config This will move all children of matching platform keys to its parent I.e. if current platform is "linux" this config: nrf5: version: 15.3.0_59ac345 windows: install_dir: C:/ubxlib_sdks/nRF5_SDK_15.3.0_59ac345 linux: install_dir: ${HOME}/sdks/nRF5_SDK_15.3.0_59ac345 will become: nrf5: version: 15.3.0_59ac345 install_dir: ${HOME}/sdks/nRF5_SDK_15.3.0_59ac345 """ newcfg = cfg.copy() for key, value in cfg.items(): if key == "linux" or key == "windows": is_linux_setting = key == "linux" if is_linux == is_linux_setting: newcfg.update(value) del newcfg[key] elif isinstance(value, dict): newcfg[key] = parse_platform_specific(cfg[key], is_linux) return newcfg
4eb7a008c96ed52a30817be5ee2765ec2b639465
94,684
def listToNum(list): """Converts a bit-list [0, 1, 0, 1] to an int.""" return int(''.join(str(x) for x in list), 2)
9ab86c56b09f6ffe4d158350d066c38a4f014c5a
94,686
def total_incl_gst(amount, gst_rate): """Calculate total including GST. Calculates total including GST and returns the GST inclusive amount and the gst_component amount. Args: amount (float): GST exlusive amount gst_rate (float): GST rate to apply. Returns: gst_incl (float): GST inclusive total gst_component (float): GST component of total. """ gst_component = amount * gst_rate gst_incl = amount + (gst_component) return gst_incl, gst_component
c1b0067da8c215b00d5566877085170fc5924aa2
94,687
def _check_suffix(path: str) -> str: """Checks that file is a .csv or .tsv file, and returns which sep to use Args: path: File path Returns: sep Sep to use for parsing """ if path[-4:] == ".tsv": return "\t" if path[-4:] == ".csv": return "," raise ValueError("File must be .csv or .tsv")
0b9c31530a961ab028f501092cbe7c048da0a4dd
94,689
def sec2samples(time_in_seconds, rate): """ turn time in seconds into time in samples""" time_in_samples = int(round(time_in_seconds * rate)) if time_in_samples < 0: time_in_samples = 0 return time_in_samples
0ecffef925a1a7a922b3b587359b2e9895936824
94,694
def get_primes(n): """Get all primes less than or equal to n.""" if n < 2: return [] primes = [2] for i in range(3, n+1): for p in primes: if i % p == 0: break if p > int(i ** 0.5): primes.append(i) break return primes
8610b7a2f292625fc076d58636a8c19fe27291ce
94,695
import time def unix_timestamp_to_string(unix_seconds, format_string): """Converts a timestamp (in unix seconds since the epoch) to a human-readable string in UTC. format_string should be in a format that time.strftime understands. """ if (unix_seconds is None): return None return time.strftime(format_string, time.gmtime(unix_seconds))
012cc3d3ebde0ca16a9bd2b95bde1618ad2ffe52
94,699
def curve_to_string(q,t,k,r,D): """ Description: Returns a string representation of the curve (q,t,r,k,D) Input: q - size of prime field t - trace of Frobenius r - size of prime order subgroup k - embedding degree D - (negative) fundamental discriminant Output: s - string representation of (q,t,r,k,D) """ if q == 0 or t == 0 or r == 0 or k == 0 or D == 0: return 'Failed to find an elliptic curve' else: return 'Elliptic curve over a field of size ' + str(q) + ' with trace ' + str(t) + ', a subgroup of order ' + str(r) + ' with embedding degree ' + str(k) + ', and fundamental discriminant ' + str(D)
075afff79982aa74e35f591828535a1ad8233a73
94,703
import logging def getLogger(module_name=None): """ Returns a logger appropriate for use in the ocp_cd_tools module. Modules should request a logger using their __name__ """ logger_name = 'ocp_cd_tools' if module_name: logger_name = '{}.{}'.format(logger_name, module_name) return logging.getLogger(logger_name)
ff1f9e859332c7f0f7d9f734d63eaefdd766b491
94,704
def get_int_from_user(message, min_, max_) -> int: """ Return an integer in range [a, b] (both included) input by user. """ error_msg = 'invalid input, try again' while True: print(message) inp = input(f' ({min_}-{max_})>') try: inp = int(inp) except ValueError: print(error_msg) continue if min_ <= inp <= max_: return inp else: print(error_msg)
513215bbfcd3bd6e6bb5686949ba65c668e87b28
94,705
def _bias_correction(X, beta, t): """Performs bias correction.""" bc = 1 - beta ** t return X / bc
da3860c76fe327bf09de5ec03aaebea52c0090f8
94,706
def lineno(tree): """ Return the line number of the AST :param tree ast: :rtype: int """ return tree['attributes']['startLine']
9786932056c21e4dcc707df14bc692e74891dd35
94,710
def average(sum, tem): """ :param sum:int,the temperature sum :param tem:int, the temperature that user entered. This function circulate the average temperature. """ return float(sum/tem)
79f53328b4b55b6e0358cfac20c92050a56c4269
94,713
import pprint def _format_errors(errors, params): """ Make human-readable error messages from a cerberus validation instance """ err_str = "" # Create a bulleted list of each cerberus error message for key in errors: for err_msg in errors[key]: err_str += " * " + key + ": " + err_msg + "\n" return "".join([ "KBaseReport parameter validation errors:\n", err_str, "You parameters were:\n", pprint.pformat(params) # "\n(View the type spec here: ", # Can point to a KIDL type spec for user reference: # "https://github.com/jayrbolton/KBaseReportPy/blob/master/KBaseReportPy.spec)\n", ])
b1a80b4b377c21f7f8023f5460baa325f9ebee08
94,714
def check_calls_with(call_mock, param, value): """ Given a mocked object, check if it was called with a given parameter and/or value. :param call_mock: the mock object to check :type call_mock: mock.call :param param : check if it was called with this given 'param' :type param : string :param value : check if it was called with this given value for the 'param' If the value is not important, call this with mock.ANY """ for cm in call_mock.call_args_list: if param in cm[1]: if cm[1][param] == value: return True return False
b6186322c2902b15146c823c57a881276f8c2408
94,718
def gnome_sort(arr): """ Gnome Sort also called Stupid sort is based on the concept of a Garden Gnome sorting his flower pots. A garden gnome sorts the flower pots by the following method- He looks at the flower pot next to him and the previous one; if they are in the right order he steps one pot forward, otherwise he swaps them and steps one pot backwards. If there is no previous pot (he is at the starting of the pot line), he steps forwards; if there is no pot next to him (he is at the end of the pot line), he is done. This is an in-place sort. :param arr: the array of values to sort :return: the sorted array, which is the same reference as arr """ index = 0 while index < len(arr): if index == 0: index = index + 1 elif arr[index] >= arr[index - 1]: index = index + 1 else: arr[index], arr[index - 1] = arr[index - 1], arr[index] index = index - 1 return arr
7212415f0c133e03dfdeb6d9b30d34c0edcdaff6
94,721
def import_word_list(file="../sgb-words.txt"): """ Import the dictionary of 5-letter words. Returns a list """ with open(file) as the_file: word_list = the_file.readlines() word_list = [line.rstrip() for line in word_list] return word_list
480491836b0d034017a8d65e16e89e07e9055599
94,723
def merge_dicts(*dicts): """Shallow copy and merge any number of input dicts. Precedence goes to key value pairs in latter dicts. Parameters ---------- dicts : dict1, dict2, ... Any sequence of dict objects to merge. Examples -------- >>> from secml.utils import merge_dicts >>> d1 = {'attr1': 100, 'attr2': 200} >>> d2 = {'attr3': 300, 'attr1': 999} # Redefining `attr1` >>> merge_dicts(d1, d2) # Value of `attr1` will be set according to `d2` dictionary {'attr3': 300, 'attr2': 200, 'attr1': 999} """ result = {} for dict_i in dicts: result.update(dict_i) return result
832128310400f45f5c8cb3ae8511d0f1d226367f
94,724
def from_task_key(key): """ Retrieves the coordinates of a tasks given its corresponding key :param key: the key to use :return: x and y coordinates of the task """ return key[0], key[1]
19ddd818ee90934fa0c2a7f2e2dee949cd971766
94,725
def d_poly_f(x, c): """Derivative function of poly_f""" df_x = 0 for i in range(1, len(c)): df_x += pow(x, i - 1) * c[i] * i return df_x
6ed8cf2ba19ae85c65547f9af1e00d25cf42bd63
94,727
from typing import List from pathlib import Path import re def parse_files(text: str) -> List[Path]: """Convert comma-delimited filenames to a list of Path objects""" if not text or not text.strip(): return [] return [Path(x.strip()) for x in re.split("[ ,]+", text)]
5823a9e444cadd25d013a278db501211b244cb19
94,728
def ensure_leading_slash(path: str) -> str: """ Ensures that the given path has a leading slash. This is needed as mock paths are stored in the database with a leading slash, but flask passes the path parameter without one. """ if path[0] != "/": return "/" + path return path
8f059b4d420ba7409993c2c324a302e993d2259a
94,730
def euro_string(value: int) -> str: """ Convert cents to string formatted euro """ result = "€ {:.2f}".format(round(value / 100, 2)) return result
c8c04772b2d8910d04c8c03583f4324f09c081f7
94,732
def CreateBFTFixture(class_name, params): """Initializes a BFT fixture instance. Imports a BFT fixture module based on class_name and initializes the instance using params. Args: class_name: fixture's import path + module name. For example, "cros.factory.test.fixture.dummy_bft_fixture.DummyBFTFixture". params: a dict of params for Init(). Returns: An instance of the specified BFT fixture implementation. """ module, cls = class_name.rsplit('.', 1) fixture = getattr(__import__(module, fromlist=[cls]), cls)() fixture.Init(**params) return fixture
e047d5f16fc797176992f13a5df42a3e990692b4
94,733
def strip_prefix(string, prefix): """ Strip the prefix from the given string and return it. If the prefix is not present the original string will be returned unaltered :param string: the string from which to remove the prefix :param prefix: the prefix to remove :return: the string with prefix removed """ if string.startswith(prefix): return string.rsplit(prefix)[1] else: return string
82ee44343c2297359481be3bb29260f6828b8b3c
94,735
from typing import Callable from typing import Any import inspect import functools def partial_kwargs(function: Callable[..., Any], **kwargs: Any) -> Callable[..., Any]: """Return a partial function application by overriding default keywords. This function is equivalent to `functools.partial(function, **kwargs)` but will raise a `ValueError` when called if either the given keyword arguments are not defined by `function` or if they do not have defaults. This is useful as a way to define a factory function with default parameters and then to override them in a safe way. Args: function: the base function before partial application. **kwargs: keyword argument overrides. Returns: A function. """ # Try to get the argspec of our function which we'll use to get which keywords # have defaults. argspec = inspect.getfullargspec(function) # Figure out which keywords have defaults. if argspec.defaults is None: defaults = [] else: defaults = argspec.args[-len(argspec.defaults) :] # Find any keys not given as defaults by the function. unknown_kwargs = set(kwargs.keys()).difference(defaults) # Raise an error if unknown_kwargs: error_string = "Cannot override unknown or non-default kwargs: {}" raise ValueError(error_string.format(", ".join(unknown_kwargs))) return functools.partial(function, **kwargs)
4543e0b4dcf9838bcbd201e76715f66b6f62825d
94,740
def get_function_name(s): """ Get the function name from a C-style function declaration string. :param str s: A C-style function declaration string. :return: The function name. :rtype: str """ s = s.strip() if s.startswith("__attribute__"): # Remove "__attribute__ ((foobar))" if "))" not in s: raise ValueError("__attribute__ is present, but I cannot find double-right parenthesis in the function " "declaration string.") s = s[s.index("))") + 2 : ].strip() if '(' not in s: raise ValueError("Cannot find any left parenthesis in the function declaration string.") func_name = s[:s.index('(')].strip() for i, ch in enumerate(reversed(func_name)): if ch == ' ': pos = len(func_name) - 1 - i break else: raise ValueError('Cannot find any space in the function declaration string.') func_name = func_name[pos + 1 : ] return func_name
7a6b66665e59526db59d8fd8ac02ec5c25a1d4c9
94,741
def has_field(field_name): """Returns a function that returns True if the obj has the field_name.""" def field_checker(obj): return hasattr(obj, field_name) return field_checker
4242d9e08a351b5d7d92e0746320d4747ae4a7f1
94,742
def name_for_image(image): """Translates an image's filename to a title. >>> name_for_image('business_cat.jpg') 'Business Cat' """ return image.split('.')[0].replace('_', ' ').title()
7b74458a5baf549292304409e91a14ec959fc9a3
94,745
def setBit (value, bit, bitval): """Returns value with a specific bit position set to bitval.""" if bitval: return value | (1 << bit) else: return value & ~(1 << bit)
113eb76a692b0b42e8d2c31b159c31ec4cfa96df
94,752
def get_jinja_variables(pipeline): """Gets a dictionary of variables from a SpinnakerPipeline that can be exposed to Jinja templates""" variables = dict() variables["trigger_job"] = pipeline.trigger_job variables["group_name"] = pipeline.group_name variables["app_name"] = pipeline.app_name variables["repo_name"] = pipeline.repo_name return variables
3acde730f9126ebb6ba1ec76b6149bf5259594b4
94,753
import pathlib def is_file_exist(name: str): """ Indicates whether file name exists or not Parameters ---------- name : str String representing filename Returns ------- bool True when filename indicated by name exists, False otherwise """ path = pathlib.Path(name) return path.is_file()
df8257b281eec415fa78372dea334864875a2f9f
94,756
def calc_dF_F(Ft, Fo): """ Takes inputs Ft and Fo and returns dF/F """ dF_F = (Ft-Fo)/Fo return(dF_F)
41bf9e0b9afea762b0d7f815999a893024f2e221
94,761
import random def random_hash(hash_length, sequence_length, seed=None): """Create a hash function for randomized partitioning. Args: hash_length: integer number of bases from each sequence to use in the hash key. sequence_length: integer length of all sequences in this partition. seed: optional hashable random seed to guarantee reproducible results. Returns: Hash function suitable for partitioning sequences with an ApproximateStrategy. """ rand = random.Random(seed) indices = sorted(rand.sample(range(sequence_length), hash_length)) def hash_func(sequence): return ''.join(sequence[idx] for idx in indices) return hash_func
29b05ebf9e11f3c99283a65a67d76a4df7ab8376
94,765
def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname): """Returns list of strings, each containing an IP address of neighbor with hostname <hostname>. Returns empty list if <hostname> not a neighbor """ addrs = [] bgp_sessions = config_db.get_table('BGP_NEIGHBOR') for addr, session in bgp_sessions.items(): if 'name' in session and session['name'] == hostname: addrs.append(addr) return addrs
00bf74fa53e5ef1e12a1fb7d8f501f8fbe8b0efb
94,767
def unescape_path_key(key): """Unescape path key.""" key = key.replace(r'\\', '\\') key = key.replace(r'\.', r'.') return key
ae06c60ebc35807b49e31d22eeea8459a780955a
94,768
import json def load_configs_from_json(file): """ Load Librarian service configuration from local json config file See documentation for expected contents of the config file Args: file (str): json file containing the config values Raises: ValueError: if file given is not defined TypeError: if data in environment variable is not valid json """ # Load the file: with open(file, 'rb') as f: config = json.load(f) return config
41d6f7c30fb02e0c1b1967959f75a3c897541aff
94,772
def collectDict(**kwargs): """ return a dict initialized with kwargs. Essentially a syntactic helper, allows to say "collectDict(a=1, b=b)" instead of: {'a':1, 'b':b} """ return kwargs
92d96cedcc09d23efe120f7a56412e7108b705f8
94,773
def get_type(g): """ Returns the rotation type of each quaternion present in the input quaternion array. Parameters ---------- g: numpy.array a quaternion array of size (5 x n) Returns ------- integer values (either +1 or -1) stored in a 1-D numpy array of size n. +1 is returned for proper rotations -1 is returned for improper rotations """ if g.ndim == 1: return g[4] else: return g[4, :]
743328fa1c5ecd4856c5e47801708424f8c51d96
94,774
import hashlib def md5_from_filelike(filelike, block_size_factor=128): """Create the hexdigested md5 checksum of the contents from a filelike object. :param filelike: the filelike object for whose contents to generate the md5 checksum :param block_size_factor: the file is read at chunks of size ``block_size_factor * md5.block_size``, where ``md5.block_size`` is the block_size used internally by the hashlib module. :returns: a string with the hexdigest md5. :raises: no checks are done on the filelike object, so it may raise IOError if it cannot be read from. """ md5 = hashlib.md5() # I read 128 bytes at a time until it returns the empty string b'' for chunk in iter(lambda: filelike.read(block_size_factor * md5.block_size), b''): md5.update(chunk) return md5.hexdigest()
3639448f1ec2e0f8b4100116283075e7fd890d94
94,776
def get_user_profile_url(user): """Return project user's github profile url.""" return 'https://github.com/{}'.format(user)
41e8f7c7cfdb479622766224c827f7611f906484
94,780
def palindrome(word): """ Verify if a word is a palindrome. The palindrome are words or phrases that you can read equally in both sides. Whether is palindrome returns True else False >>> palindrome("radar") True >>> palindrome("taco cat") True >>> palindrome("holah") False >>> palindrome("Ana") True >>> palindrome("Atar a la rata") True """ if word.lower().replace(" ", "") == word[::-1].lower().replace(" ", ""): return True return False
5fa97cf41ddc750b1fb1c1c808b829984ed29bbe
94,783
import math def db(val): """Just return the 10log10 applied to val""" return 10 * math.log10(val)
376dbd0ac6531d6439ec4e82bf4828e3da15c702
94,786
def sbxor(a: bytearray, k: int) -> bytearray: """Calculate the single byte xor a ^ k Arguments: a {bytearray} -- Bytearray to be xor'd with k {int} -- single byte key to test Returns: bytearray -- a ^ k """ assert(k < 0x100 and k >= 0x0), "k must be a single byte" n = bytearray(len(a)) for i in range(len(a)): n[i] = a[i] ^ k return n
cf3911d9a0113177e5dea5f2d1197a6e4ffb226d
94,795
def find_frame(buffer): """ Finds the next MP3 frame header. @param bytearray buffer Bytes from an MP3 file. @return int The index in the buffer where the frame was found, or -1 if not found. """ try: synchs = [buffer.find(b'\xFF\xFA'), buffer.find(b'\xFF\xFB')] return min(x for x in synchs if x > -1) except ValueError: return -1
fc9e4d90e4cc2662c33a67b4b7d536a3dbfd6c2f
94,796
def read_genes(file): """Read in gene list from file""" with open(file) as f: genes = [i.rstrip().lower() for i in f.readlines()] return genes
eb145df9aa009b7ed947fd57d479abc0781bce60
94,799
def http_verifier(url: str): """ verifies if the url starts with http:// or https://. If not, http:// is put in the start of url :param url: url to be verified :return: url with http:// """ if not any(x in url[:10] for x in ['http://', 'https://']): return 'http://' + url else: return url
33a1c4c629ee3ceb8d476bd28200014fb949337c
94,802
def uint64_tag(name, value): """Create a DMAP tag with uint64 data.""" return name.encode('utf-8') + \ b'\x00\x00\x00\x08' + \ value.to_bytes(8, byteorder='big')
dc5ea3d37f9864f318b48e179f201388614d7c33
94,803
def get_ord(c): """Get Unicode ord.""" return ord(c)
122e432dda5a485ac7167d3a835876494d40c26c
94,804
import pickle def save_pkl(object, path): """save python object to a pickle file Example: :: # save a dict to file dic = dict(a=1, b=2) save_pkl(dic, "dic.pkl") print(load_pkl("dic.pkl")) :param object: the python object to be saved. :param path: target path. """ f = open(path, "wb") pickle.dump(object, f) f.close() return path
5817206087378c232ad710eb3103c418a13f30b6
94,806
def excess_returns(returns, bm=0): """ Return the excess amount of returns above the given benchmark bm """ return returns - bm
762d51dff6e2f8cb369e129f97c2268394341363
94,809
def get_coordinates(filename): """ Extract information about atoms and their coordinates from .xyz file.""" with open(filename, "r") as f: data = [] lines = f.readlines() for line in lines: data.append(line.split()) N = int(data[0][0]) data = data[2:] atom_types, coords = [], [] for x in range(N): atom_types.append(data[x][0]) coords.append([float(data[x][1]), float(data[x][2]), float(data[x][3])]) return atom_types, coords
3139328855612b8f98443ddda736c5960e0ea92d
94,810
def is_some_keyword_in_text(text, keywords): """ >>> test_text = "What is the current price of Ethereum" >>> is_some_keyword_in_text(test_text, keywords=["price", "buy"]) True >>> is_some_keyword_in_text(test_text, keywords=["potato"]) False """ res = False lowered_text = text.lower() for keyword in keywords: lowered_keyword = keyword.lower() if lowered_keyword in lowered_text: res = True break return res
98449ec3d2225dd2075c445efb502eabef3c14c5
94,811
import re def preprocess_mako(text): """ Mako template rendering treats line-initial /##+/ as comments that get ignored, but these are important for Markdown. Replace them with a substitution ${ s } where s is the hash sequence. """ return re.sub( r'(^|(?<=\n))(?P<lead>\s*)(?P<hash>#+)(?P<trail>\s*)', r'\g<lead>${"\g<hash>"}\g<trail>', text )
c1ae85676b4556207688803162fe326f1207e5ec
94,818
def velocity(avg_slice_df, u_keys, norm_u_keys, u_reference): """Normalize the flow velocity vector by a reference velocity. Args: avg_slice_df (DataFrame): Averaged slice dataframe. norm_u_keys (list): Keys to assign to normalized velocity components. u_reference (float): Reference velocity used for normalization. """ dataframe = avg_slice_df.copy() dataframe[norm_u_keys] = dataframe[u_keys]/u_reference return dataframe
7a32acd45a28321cec6aa010c7a4e5846f0959b1
94,819
import torch def _compute_ece(prob, bin_mean_prob): """Compute the expected calibration error (ECE). Args: prob: Tensor, shape (2,num_bins), containing the probabilities over the {incorrect,correct}x{0,1,..,num_bins-1} events. bin_mean_prob: Tensor, shape (1,num_bins), containing the average probability within each bin. Returns: ece: Tensor, scalar, the expected calibration error. """ pz_given_b = prob / torch.unsqueeze(torch.sum(prob, dim=0), 0) prob_correct = prob[1, :] / torch.sum(prob[1, :]) ece = torch.sum(prob_correct * torch.abs(pz_given_b[1, :] - bin_mean_prob)) return ece
d03b0c63c61489233107aa4600a34077b39c56ab
94,820
def merge_floats(values): """Return merged values as an average of floats (or None). Args: values: [str] of column values in sample equivalence class Returns: float: average of numeric values or None if no numeric values """ v_sum = 0 n_sum = 0 for v in values: # Try to convert all column values to floats. try: x = float(v) # Ignore values that cannot be converted to a float. except ValueError: pass # Increment aggregate value else: v_sum += x n_sum += 1 # Compute and return average value if it exists, else return None if n_sum <= 0: return None else: s = v_sum / n_sum return s
a4a3869662ea1f1da326956b7cc6dfcdb58603d4
94,823
import time def start_timecount() -> float: """Return the current time.""" return time.time()
9ff1b013e047d2a9046124dd5e6d9fe8f6b6c21b
94,826
def create_SNN_hparams(hidden_layers=[30], learning_rate=0.001, epochs=100, batch_size=64, pair_size=32, activation='relu', optimizer='Adam', singlenet_l1=0, singlenet_l2=0, reg_term=0, feature_vector_dim=10, dropout=0, fp_length=100, fp_number=3, conv_width=8, conv_number=2, conv_activation='relu', conv_l1=0, conv_l2=0, verbose=1): """ Creates hparam dict for input into create_DNN_model or other similar functions. Contain Hyperparameter info :return: hparam dict """ names = ['hidden_layers', 'learning_rate', 'epochs', 'batch_size', 'pair_size', 'activation', 'optimizer', 'singlenet_l1', 'singlenet_l2', 'reg_term', 'dropout', 'feature_vector_dim', 'fp_length', 'fp_number', 'conv_width', 'conv_number', 'conv_activation', 'conv_l1', 'conv_l2', 'verbose'] values = [hidden_layers, learning_rate, epochs, batch_size, pair_size, activation, optimizer, singlenet_l1, singlenet_l2, reg_term, dropout, feature_vector_dim, fp_length, fp_number, conv_width, conv_number, conv_activation, conv_l1, conv_l2, verbose] hparams = dict(zip(names, values)) return hparams
01ce71aef3104d112e9d1c4df0194faf9f738c28
94,830
import re def get_version(version_file): """ Grabs the version from the version_file str path. Expected to be formatted as `__version__ = (#,#,#)`. PARAMETERS ---------- version_file The path to the file the version is located in. RETURNS ------- - The version as a period separated string. For example, '1.2.3'. """ # Inspired by https://stackoverflow.com/a/7071358 ver_str_line = open(version_file, 'rt').read() vsre = r'__version_info__ *= *\( *(\d+) *, *(\d+) *, *(\d+) *\)' mo = re.search(vsre, ver_str_line, re.M) if mo: vertup = mo.group(1), mo.group(2), mo.group(3) else: raise RuntimeError('Unable to find version string in %s.' % (version_file,)) ver_str = '.'.join(vertup) return ver_str
81b6e56f13f7a4dc346f0bf941d473116fc7add6
94,833
def get_maximum_fitness_instance(merged_population): """ small helper function returning the instance of a population with maximum fitness """ max_fit_inst = None max_biased_fitness = 0 for inst in merged_population: if max_biased_fitness < inst.fitness: max_fit_inst = inst max_biased_fitness = inst.fitness return max_fit_inst
0c773b50e9996544c47519cfb6144653272c40f3
94,840
def replace_oov_words_by_unk(tokenized_sentences, vocabulary, unknown_token="<unk>"): """ Replace words not in the given vocabulary with '<unk>' token. Args: tokenized_sentences: List of lists of strings vocabulary: List of strings that we will use unknown_token: A string representing unknown (out-of-vocabulary) words Returns: List of lists of strings, with words not in the vocabulary replaced """ # Place vocabulary into a set for faster search vocabulary = set(vocabulary) # Initialize a list that will hold the sentences # after less frequent words are replaced by the unknown token replaced_tokenized_sentences = [] # Go through each sentence for sentence in tokenized_sentences: # Initialize the list that will contain # a single sentence with "unknown_token" replacements replaced_sentence = [] # for each token in the sentence for token in sentence: # complete this line # Check if the token is in the closed vocabulary if token in vocabulary: # complete this line # If so, append the word to the replaced_sentence replaced_sentence.append(token) else: # otherwise, append the unknown token instead replaced_sentence.append(unknown_token) # Append the list of tokens to the list of lists replaced_tokenized_sentences.append(replaced_sentence) return replaced_tokenized_sentences
b4d7e6b3b5568f6c2e752e779a5f3763b416dd7f
94,842
def clean_names(function): """Clean function names. Args: function (str): a function name. Returns: str: A string with all characters lowered and only the last word if there are multiple period joined words. """ if function is not None: out = function.lower() out = out.split('.')[-1] return out
560c10c27c67b988c85862e749732b7a70e3f24d
94,854
import click def validate_scanner_list(ctx, param, value): """ Validate a comma-separated list of scanners and extract it into a list of groups and IDs. """ if not value: return None valid_groups = ctx.obj.scanner_groups scanners = [x.strip() for x in value.split(',')] if 'all' in scanners: return ['all'] scanner_ids = [] for scanner in scanners: if scanner.isdigit(): scanner_ids.append(scanner) elif scanner in valid_groups: scanner_ids += ctx.obj.scanner_group_map[scanner] else: raise click.BadParameter('Invalid scanner "{0}" provided. Must be a valid group or numeric ID.' .format(scanner)) return scanner_ids
4a3ad0b9fd4ccbf9a242d7a71102a1a77d9f2bf7
94,855
def solution(number: int) -> int: """ If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Finish the solution so that it returns the sum of all the multiples of 3 or 5 below the number passed in. :param number: :return: """ result = 0 for n in range(1, number): if n % 3 == 0 or n % 5 == 0: result += n return result
df68601720e6bde1289cf49caad469c6e60bd31e
94,857
import re def to_seconds(value: str) -> int: """Convert time unit given as '<int> <s|m|h|d>` to seconds. :param value: input str :type value: str :raises ValueError: value must be given as '<int> <s|m|h|d> :raises ValueError: value must be > 0s :return: value given in seconds :rtype: int """ value = value.strip() pattern = re.compile(r"\d+ *[sSmMhHdD]") if not pattern.fullmatch(value): raise ValueError( "invalid interval: {}, interval must be given as '<int> <s|m|h|d>'".format( value ) ) num = int(value[0 : len(value) - 1]) unit = value[-1].lower() as_seconds = {"s": 1, "m": 60, "h": 60 * 60, "d": 60 * 60 * 24} result = as_seconds[unit] * num if result <= 0: raise ValueError( "invalid interval: {}, interval must be greater than 0 seconds".format( result ) ) return result
c9b7e953feb56740a4f11b6c571c73226730283b
94,868
def unpack_traces(traces): """Returns states, actions, rewards, end_states, and a mask for episode boundaries given traces.""" states = [t[0].state for t in traces] actions = [t[0].action for t in traces] rewards = [[e.reward for e in t] for t in traces] end_states = [t[-1].next_state for t in traces] not_done_mask = [[1 if n.next_state is not None else 0 for n in t] for t in traces] return states, actions, rewards, end_states, not_done_mask
c099abd75cd6de680936b0972330f80d62194666
94,870
def get_folder_name_for_project(project_name): """ Construct a folder name from the given project name by converting to lower case and replacing spaces with underscores: My Project -> my_project """ return project_name.lower().replace(' ', '_')
02389cfecb7d9db586d05a52766665a2f5aac40e
94,872
def is_valid_description(description): """Check if description is a string""" return isinstance(description, str)
eb04eee74189cfe8b5e78e30d52576ac616ccea3
94,873
def device_permission(user, device): """ Internal method to check if 'user' can edit 'device'. """ if not user.is_authenticated: # anon return False elif user.is_staff: # admin/staff return True elif device.startswith(f"{user.username}/"): # owner return True
1894dd65f6a99340beb20569cacfb307b0093067
94,876
def photorespiration_rate(gross_canopy_photosynthesis_rate, stomata_co2_concentration, co2_compensation_point): """ Equations 9.13 photorespiration_rate = gross_canopy_photosynthesis_rate * CO2_compensation_point / stomata_CO2_concentration Args: gross_canopy_photosynthesis_rate: stomata_co2_concentration: co2_compensation_point: Returns: photorespiration rate [µmol {CO2} m^-2 s^-1] """ return gross_canopy_photosynthesis_rate * co2_compensation_point / stomata_co2_concentration
d54ff5c2bb791bfd7395ecaa8e08bfea927cc2a8
94,877
from typing import OrderedDict def seconds_to_human(seconds): """ Convert seconds to human readable format like 1M. From Thomas Sileo's blog post How to convert seconds to human readable interval back and forth with Python: <https://myl.be/d4>. :param seconds: Seconds to convert :type seconds: int :rtype: str :return: Human readable string """ interval_dict = OrderedDict([("years", 365*86400), # 1 year ("months", 30*86400), # 1 month ("weeks", 7*86400), # 1 week ("days", 86400), # 1 day ("hours", 3600), # 1 hour ("minutes", 60), # 1 minute ("seconds", 1)]) # 1 second seconds = int(seconds) string = "" for unit, value in interval_dict.items(): subres = seconds / value if subres: seconds -= value * subres if subres == 1: unit = unit.rstrip('s') string += "{0} {1} ".format(subres, unit) return string.rstrip()
3d38c61bf9fb2d80c2c7f326a7035c6dffde7f94
94,879
import torch def preds_per_cat(targs,bin_preds,cat): """ Compares model predictions with ground-truth labels for multi-label classification. Returns a column-wise sum of predictions by category, for each true category. In the output table, the true category is the row and the predictions made are the columns. targs: targets (tensor, n_images x n_categories), i.e., ground-truth labels bin_preds: binary predictions (tensor, same shape as targets) cat: the category index (for example, a number between 0-18 if there are 19 categories) """ cat_idx = (targs[:,cat] > 0) #only rows where the category was seen dist = torch.sum(bin_preds[cat_idx],dim=0) return dist
03c9d3dc1bc8fa9ee9b52705fb84857f91ce83d6
94,887
def get_id_lookup_from_demographics_file(demographics_df): """ Extract a lookup (Redcap ID -> NCANDA SID) from a demographics DataFrame. Expects a demographics_df outputted by `process_demographics_file`. """ return (demographics_df .reset_index() .set_index('study_id') .to_dict() .get('mri_xnat_sid'))
84bb0ad97693c00624534fcd91a1360b3163d70a
94,890