content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def parabole(x, a, b, c) : """ Fonction parabolique du type y = a*x**2 + b*x + c Paramètres : x (liste ou tableau Numpy) : abscisses. a (float) : b (float) : c (float) : Retourne : Valeur de la fonction (float ou tableau Numpy) """ return a*x**2+b*x+c
38f8f9c76d65e582ddd7b96198ec81bb63818446
410,691
import re def _find_special_id(txt: str, pattern: str, split_str: str) -> list: """Creates an accession ID from starting characters in `pattern` and digits following `split_str` in `txt`. Args: txt (str): Text to search for ID pattern (str): Pattern containing at the start the character prefix and at the end the remaining digits of the accession ID split_str (str): String separating the digit part of the ID Returns: list: List with accession ID. """ match = re.findall(f'({pattern})', txt) ids = [] if len(match) != 0: for match in match: split_match = match.split(split_str) prefix = re.findall("[A-Z]+", split_match[0])[0] number = split_match[-1].strip() ids += [prefix + number] return ids
5492c8200d197bf93fc7b13b3b59120df91c3f80
551,860
def is_cyclic(x, y): """Are these four-digit numbers cyclic?""" # We can safely truncate to int as x and y come from the polygonal funcs. return str(int(x))[2:] == str(int(y))[:2]
247a15a931cd2689de17f8c5d4bdbeb1a923e5d1
354,313
import math def binomial(n, k): """Computes the binomial coefficient "n over k". """ if k == n: return 1 if k == 1: return n if k > n: return 0 return math.factorial(n) // ( math.factorial(k) * math.factorial(n - k) )
1f7a6e7709cb6adaf178a49fced2f1961f70ef87
665,520
from typing import Iterator import re def allints(s: str) -> Iterator[int]: """ Returns a list of all of the integers in the string. """ return map(lambda m: int(m.group(0)), re.finditer(r"-?\d+", s))
0820af0942192eaebbba27c5feb5b57f06bb8628
420,597
import re def _read_ssim_values(content): """Parse FFMpeg output to find SSIM statistics""" result = {} stats_re = re.compile(r"^\[.*\] SSIM (?P<stats>.*)$") for line in content.splitlines(): match = stats_re.match(line) if match: for stat in match.group("stats").split(): if stat.startswith('('): continue name, value = stat.split(":", 1) result[name] = float(value.split()[0]) return result
28c504536261f25561af552fab62470e13a74008
225,241
import pickle def save_pickle(obj, filename): """ Serializes a given object Parameters: ----------- obj : object filename : string """ return pickle.dump(obj, open(filename, 'wb'))
ac658f4b32f4a9bf036c28e06e849eaacc312284
292,636
def basic_stats(db): """Collect basic statistics to be fed to output functions""" rps = len(list(db['rp'].keys())) users = len(list(db['users'].keys())) logins = db['logins'] return {"rps": rps, "users": users, "logins": logins}
bdb4d93807d078789dfb44f1b47158a67f28165c
669,065
def readable_timedelta(days): """Print the number of weeks and days in a number of days.""" #to get the number of weeks we use integer division weeks = days // 7 #to get the number of days that remain we use %, the modulus operator remainder = days % 7 return "{} week(s) and {} day(s).".format(weeks, remainder)
120f517939842b4e0686a57a3117221e3db63004
7,681
def get_dimension(geometry): """Gets the dimension of a Fiona-like geometry element.""" coordinates = geometry["coordinates"] type_ = geometry["type"] if type_ in ('Point',): return len(coordinates) elif type_ in ('LineString', 'MultiPoint'): return len(coordinates[0]) elif type_ in ('Polygon', 'MultiLineString'): return len(coordinates[0][0]) elif type_ in ('MultiPolygon',): return len(coordinates[0][0][0]) else: raise ValueError("Invalid type '{}'".format(type_))
763ad23df99fa67981aa38a001053b38196eb9fb
442,739
import unicodedata def is_unicode_letter(character): """Return True if character is a Unicode letter""" if ord(character) > 127: return unicodedata.category(character)[0] == 'L' return False
74cdd6deae1fc4e81d585bbad8b39b41c5cf8455
191,635
from typing import Optional from typing import Tuple def wrap_slice( start: Optional[int], stop: Optional[int], step: Optional[int], length: Optional[int], ) -> Tuple[int, int, int]: """Wraps slice indices into a window. Arguments: start (int): :attr:`slice.start` index, or ``None``. stop (int): :attr:`slice.stop` index, or ``None``. step (int): :attr:`slice.step` value, or ``None``. length (int): Exclusive end of the virtual range to wrap, or ``None``. Returns: tuple of int: Wrapped slice parameters. Examples: >>> wrap_slice(3, 5, 1, 7) (3, 5, 1) >>> wrap_slice(-3, 5, 1, 7) (4, 5, 1) >>> wrap_slice(3, -5, 1, 7) (3, 2, 1) >>> wrap_slice(-3, -5, 1, 7) (4, 2, 1) >>> wrap_slice(None, 5, 1, 7) (0, 5, 1) >>> wrap_slice(3, None, 1, 7) (3, 7, 1) >>> wrap_slice(3, 5, None, 7) (3, 5, 1) >>> wrap_slice(3, 5, 1, None) (0, 0, 1) """ if step is None: step = 1 if length: if start is None: start = 0 elif not 0 <= start < length: start %= length if stop is None: stop = length elif not 0 <= stop < length: stop %= length else: start = 0 stop = 0 return start, stop, step
78242a65dde43208e36b9aaa38a7912310747a3f
90,023
def _extract_open_mpi(version_buffer_str): """ Parses the typical OpenMPI library version message, eg: Open MPI v4.0.1, package: Open MPI Distribution, ident: 4.0.1, repo rev: v4.0.1, Mar 26, 2019 """ return version_buffer_str.split("v", 1)[1].split(",", 1)[0]
a20d1ed65f9b40c50ec5c28a17fd73f11a470c9e
668,209
def zfsr32(val, n): """zero fill shift right for 32 bit integers""" return (val >> n) if val >= 0 else ((val + 4294967296) >> n)
4b890caa0b7b086e923e7b229e5551fd66d24016
4,672
import struct def Bbytes(b): """ Return bytes representation of boolean """ return struct.pack("?", b)
93ceb40f02227a20094a918b2daa0ecb1447abc1
368,494
import string def is_alnum(a_string, okchars=None): """ Checks if the *a_string* contains alnum characters (i.e like string.alnum) but allows you to also include specific characters which you want to allow. Returns false if a character in *a_string* is not alphanumeric and not one of *okchars* """ if okchars: _oklist = list(string.ascii_letters + string.digits) + list(okchars) else: _oklist = list(string.ascii_letters + string.digits) str_list = list(a_string) for s in str_list: if s not in _oklist: return False return True
10cd7dbce1ff3a0bc66fe9b8c393753198a121cc
578,124
def filter_query_by_project(q, project_safe, context): """Filters a query to the context's project Returns the updated query, Adds filter to limit project to the context's project for non-admin users. For admin users, the query is returned unmodified. :param query: query to apply filters to :param project_safe: boolean indicating if project restriction filter should be applied :param context: context of the query """ if project_safe and not context.is_admin: return q.filter_by(project=context.project_id) return q
21e39921168a05d1c2012dfcadcd3f41bd41a6d6
194,339
def binary_search(lst, item): """ Perform binary search on a sorted list. Return the index of the element if it is in the list, otherwise return -1. """ low = 0 high = len(lst) - 1 while low < high: middle = (high+low)/2 current = lst[middle] if current == item: return middle elif current < item: low = middle+1 elif current > item: high = middle-1 if lst[low] == item: return low return -1
014ff8f943a04d66499affac1320c738c7ddd292
204,414
def xy(v0x=2.0, v0y=5.0, t=0.6): """Computes horizontal and vertical positions at time t""" g = 9.81 # acceleration of gravity return v0x*t, v0y*t - 0.5*g*t**2
87abba7e5414bb0966c1e423a04f5216126013b8
75,181
import struct import socket def maxIpaddr(ipaddr, netmask): """ Takes a quad dot format IP address string and makes it the largest valid value still in the same subnet. Returns: Max quad dot IP string or None if error """ try: val = struct.unpack("!I", socket.inet_aton(ipaddr))[0] nm = struct.unpack("!I", socket.inet_aton(netmask))[0] inc = struct.unpack("!I", socket.inet_aton("0.0.0.254"))[0] val &= nm val |= inc return socket.inet_ntoa(struct.pack('!I', val)) except Exception as e: return None
4ac9599b57e52881481f094d9d614462fe31a065
559,497
def as_tuple(range_): """Returns a Python tuple (reference_name, start, end).""" return range_.reference_name, range_.start, range_.end
591f51a6e51fdd5c528775a9fa18bad16675369b
295,306
import hashlib def computeFileChecksum(algo, filePath): """Compute digest of ``filePath`` using ``algo``. Supported hashing algorithms are SHA256, SHA512, and MD5. It internally reads the file by chunk of 8192 bytes. :raises ValueError: if algo is unknown. :raises IOError: if filePath does not exist. """ if algo not in ['SHA256', 'SHA512', 'MD5']: raise ValueError("unsupported hashing algorithm %s" % algo) with open(filePath, 'rb') as content: hash = hashlib.new(algo) while True: chunk = content.read(8192) if not chunk: break hash.update(chunk) return hash.hexdigest()
5e93b79ec6f008133e2ce436c91be9452d912c63
692,914
def get_util_info(service, curr_util, config): """ Fetches the current utilization along with high and low thresholds defined in config file. Args: service: name of the service curr_util: the current utilization data of all services config: config file Returns: Dictionary of data: result = { "service" : xx, "curr_cpu_util" : xx, "curr_mem_util" : xx, "high_cpu_threshold" : xx, "low_cpu_threshold" : xx, "high_mem_threshold" : xx, "low_mem_threshold" : xx } """ result = {} result["service"] = service result["curr_cpu_util"] = "%.3f " % curr_util[service]['cpu'] # Round it to 3 decimal digits result["curr_mem_util"] = "%.3f " % curr_util[service]['memory'] # Round it to 3 decimal digits result["curr_netRx_util"] = "%.3f " % curr_util[service]['netRx'] # Round it to 3 decimal digits result["curr_netTx_util"] = "%.3f " % curr_util[service]['netTx'] # Round it to 3 decimal digits result["high_cpu_threshold"] = config.get(service, 'cpu_up_lim') # Ex second argument should be: cpu_up_lim result["low_cpu_threshold"] = config.get(service, 'cpu_down_lim') # Ex second argument should be: cpu_low_lim result["high_mem_threshold"] = config.get(service, 'mem_up_lim') # Ex second argument should be: cpu_up_lim result["low_mem_threshold"] = config.get(service, 'mem_down_lim') # Ex second argument should be: cpu_low_lim return result
6852ca60aecdc6f67703f730f75ee9e978c12dd2
246,710
def evaluate_apartment(area: float, distance_to_underground: int) -> float: """Estimate price of an apartment.""" price = 200000 * area - 1000 * distance_to_underground return price
c212a0ddffdfa2fd9f14c9410c7cd88418cc505f
108,473
from typing import Counter def predict_license_expression(license_matches): """ Return the best-effort predicted license expression given a list of LicenseMatch objects. """ unknown_expressions = ['unknown', 'warranty-disclaimer'] license_expressions = ( license_match.license_expression for license_match in license_matches ) known_expressions = [ le for le in license_expressions if le not in unknown_expressions ] if not known_expressions: return "unknown" license_expressions_counts = dict(Counter(known_expressions).most_common()) highest_count = list(license_expressions_counts.values())[0] top_license_expressions = [ expression for expression, count in license_expressions_counts.items() if count == highest_count ] if len(top_license_expressions) == 1: return top_license_expressions[0] top_license_matches = [ license_match for license_match in license_matches if license_match.license_expression in top_license_expressions ] max_match_length = max([ license_match.matched_length for license_match in top_license_matches ]) license_expression_prediction = next( license_match.license_expression for license_match in top_license_matches if license_match.matched_length is max_match_length ) return license_expression_prediction
a36adb49397060d6848051b7cb6acf535969db33
261,428
def set(bit): """Set the specifeid bit (1-indexed) eg. set(8) == 0x80""" return 1 << (bit - 1)
84a43bdd5ef4d7ee2d5a85a5fea6cb71dda85300
251,755
def strcmp(s1, s2): """Compare two strings respecting german umlauts.""" chars = "AaÄäBbCcDdEeFfGgHhIiJjKkLlMmNnOoÖöPpQqRrSsßTtUuÜüVvWwXxYyZz" lng = min(len(s1), len(s2)) for i in range(lng): d = chars.find(s1[i]) - chars.find(s2[i]); if d != 0: return d return len(s1) - len(s2)
092934118b539d0af148fe3c3685387f43ab8250
220,411
import torch from typing import Union from typing import Iterable def clamp_norm( x: torch.Tensor, maxnorm: float, p: Union[str, int] = 'fro', dim: Union[None, int, Iterable[int]] = None, eps: float = 1.0e-08, ) -> torch.Tensor: """Ensure that a tensor's norm does not exceeds some threshold. :param x: The vector. :param maxnorm: The maximum norm (>0). :param p: The norm type. :param dim: The dimension(s). :param eps: A small value to avoid division by zero. :return: A vector with |x| <= max_norm. """ norm = x.norm(p=p, dim=dim, keepdim=True) mask = (norm < maxnorm).type_as(x) return mask * x + (1 - mask) * (x / norm.clamp_min(eps) * maxnorm)
cb8968124eedcaa695f2e20916f0d6efef386fb7
254,207
import re def extract(regularE : str, init : str, stop : str, string : str): """ regularE: RE to catch string init: First string to replace stop: Last string to replace string: String to apply the RE With a regular expression and init and stop to replace, gets a substring from string argument and returns it. """ return re.findall(regularE, string)[0]\ .replace(init, "")\ .replace(stop, "")
4466f2a0a7377d634554a231b5b79c2778bc3f30
572,444
from typing import List from typing import Optional from typing import Mapping def resolver(parameters: List[str], defaults: Optional[Mapping]=None): """ Creates a function that resolves its positional and keyword arguments against a list of parameters, returning a mapping from parameter to argument value. :param parameters: parameters to resolve against :param defaults: default values for parameters """ defaults = defaults or {} def resolve(*args, **kwargs): resolved = dict(zip(parameters, args)) # resolved positionals remaining = set(parameters) - set(resolved) resolved.update({ p: kwargs.get(p, defaults[p]) if p in defaults else kwargs[p] for p in remaining }) # include resolved keywords return resolved return resolve
b372c36967d1929e357688b7f30d2c1397db46cb
347,874
def str_xor(a, b): """ (string, string) -> string xor two strings(a and b) of different lengths >>> str_xor("string", "integer") '\x1a\x1a\x06\x0c\t\x02' >>> str_xor("hello", "world") '\x1f\n\x1e\x00\x0b' >>> str_xor("foo", "bar!") '\x04\x0e\x1d' >>> str_xor("AI", " ") 'ai' :param a: string a :param b: string b :return: the XORed string """ if len(a) > len(b): return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)]) else: return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
1fcef738dfe37be4dc9141acf0913d5a589505dc
606,828
def _LoadPathmap(pathmap_path): """Load the pathmap of obfuscated resource paths. Returns: A dict mapping from obfuscated paths to original paths or an empty dict if passed a None |pathmap_path|. """ if pathmap_path is None: return {} pathmap = {} with open(pathmap_path, 'r') as f: for line in f: line = line.strip() if line.startswith('--') or line == '': continue original, renamed = line.split(' -> ') pathmap[renamed] = original return pathmap
48b9b73926b77081587e898dd89744b68478ba0b
586,968
def conta_palavras(frase): """ Recebe uma palavra e retorna um dicionário em que a chave é uma palavra e seu valor a quantidade de vezes que essa palavra está presente na frase """ d = {} for palavra in frase.lower().split(): if palavra in d: d[palavra] += 1 else: d[palavra] = 1 return d
7539e171a8636393fc6128aec259b1bf95f8e477
289,122
def groupID_callback(x): """ image_name = 1_1-1256_1264_2461_2455-12003110450161(_1).jpg big_image_name = 12003110450161 """ img_path = x['info']['image_path'] big_img_name = img_path.split('.')[-2] big_img_name = big_img_name.split('-')[-1].split('_')[0] return big_img_name
ef7e1ef4b7f5caad4bfa9e91adac7ba8420341d9
115,491
def get_at_content(sequence): """Return content of AT in sequence, as float between 0 and 1, inclusive. """ sequence = sequence.upper() a_content = sequence.count('A') t_content = sequence.count('T') return round((a_content+t_content)/len(sequence), 2)
6316d29cdb9d7129f225f2f79a50485fb6919e32
3,570
async def amount_to_secs(amount: tuple) -> int: """Resolves one unit to total seconds. Args: amount (``int``, ``str``): Tuple where str is the unit. Returns: ``int``: Total seconds of the unit on success. Example: >>> await amount_to_secs(("1", "m")) 60 """ num, unit = amount num = int(num) if not unit: unit = 's' if unit == 's': return num elif unit == 'm': return num * 60 elif unit == 'h': return num * 60 * 60 elif unit == 'd': return num * 60 * 60 * 24 elif unit == 'w': return num * 60 * 60 * 24 * 7 else: return 0
4804cab093503f8f23bb3a7f50c305feb130a8b6
287,131
def normalize_loss_dict(losses, weight=1, inplace=True): """Normalize all losses in a dict. Parameters ---------- losses : dict Accumulated dictionary of losses/metrics weight : float, default=1 Sum of weights across all batches inplace : bool, default=True Modify the dictionary in-place Returns ------- losses : dict Normalized dictionary of losses/metrics """ if not inplace: losses = dict(losses) for key, val in losses.items(): losses[key] /= weight return losses
bca4e133f920b968a8e08ee8d0f10b38ff2d8251
170,951
def _xpath_eval(xmlschema, xpath, namespaces): """ Wrapper around the xpath calls in this module. Used for caching the results :param xmlschema: xmltree representing the schema :param xpath: str, xpath expression to evaluate :param namespaces: dictionary with the defined namespaces """ return xmlschema.xpath(xpath, namespaces=namespaces)
4fc0b83f0d88432f845a17e516c14494a1c275cb
234,811
def max_contig_sum(L): """ L, a list of integers, at least one positive Returns the maximum sum of a contiguous subsequence in L """ ############# This is getting the biggest powerset of L # def powerset(s): # x = len(s) # masks = [1 << i for i in range(x)] # for i in range(1 << x): # yield [ss for mask, ss in zip(masks, s) if i & mask] # # max_value = 0 # for i in list(powerset(L)): # if sum(i) > max_value: # max_value = sum(i) # return max_value ############ This is getting the maximum contiguous subsequence max_value = 0 value = 0 for i in range(len(L)): value = value + L[i] if value < 0: value = 0 if max_value < value: max_value = value return max_value
33e73e4a98943adadfda75af103588e7caa2115f
692,656
def _varied_parameters(parameters, varied, names): """ Help function for identifying parameters that are varied (or fixed) in experiments. (Not used anymore in this module.) ========== =========================================================== names names of parameters. parameters values of parameters. varied subset of names (the parameters that are varied elsewhere). return a list of the indices in parameters corresponding varied. ========== =========================================================== An example may help to show the idea. Assume we have three parametes named 'a', 'b', and 'c'. Their values are 1, 5, and 3, i.e., 'a' is 1, 'b' is 5, and 'c' is 3. In a loop elsewhere we assume that 'a' and 'c' are varied while 'b' is fixed. This function returns a list of the parameter values that correspond to varied parameters, i.e., [1,3] in this case, corresponding to the names 'a' and 'c': >>> parameters = [1,5,3] >>> names = ['a','b','c'] >>> varied = ['a','c'] >>> varied_parameteres(parameters, varied, names) [1,3] """ indices_varied = [names.index(i) for i in varied] varied_parameters = [parameters[i] for i in indices_varied] return varied_parameters
ca8609c45500d762bda929b5b3d85179a1f8aa63
192,480
def gc_at_decompress(gcat): """ Decompress GT/AC content - inverse function of gc_at_compress(gc, at). Parameters ---------- gcat : numpy.ndarray Array contains compressed GC/AT content. Returns ------- gc : list of int Binned GC content (100bp bins). at : list of int Binned AT content (100bp bins). """ return list(gcat[:gcat.size // 2]), list(100 - gcat[:gcat.size // 2] - gcat[gcat.size // 2:])
be8d7009ee2465ec66a4c8df8beee95cd3f274e1
408,966
def combine_duplicate_businesses(businesses): """ Averages ratings of the same business from different sources :param businesses: Full list of businesses :returns: Filtered list with combined sources """ seen_addresses = set() filtered_list = [] for business in businesses: if business.address not in seen_addresses: filtered_list.append(business) seen_addresses.add(business.address) else: # Find duplicate in list for b in filtered_list: if b.address == business.address: # Average bayesian ratings and update source count new_rating = (b.bayesian + business.bayesian) / 2.0 b.bayesian = new_rating b.source_count = b.source_count + 1 return filtered_list
1ffc4ba92c206233525e843622d44f831ee7c138
145,459
def binary_search(array: list[int], item: int) -> int: """ Search an item in an array. binary_search ============= The `binary_search` function takes an sorted array and an item. If the item is in the array, the function returns its position or index. Parameters ---------- array: list[int] an array/list of integers item: int an integer value Returns ------- index: int Index of the 'item' is in the array -1: int The given 'item' is not in the array """ low = 0 high = len(array) - 1 while low <= high: index = (low + high) // 2 # The `index` is rounded down by # Python automatically if (low + high) # isn’t an even number value = array[index] # Each time we check for the middle # element if item == value: # the item is found return index if item > value: # the guessed value was too low low = index + 1 else: # the guessed value was too high high = index - 1 return -1 # the item doesn't exist in the array
7c9686c8771b9c62c87f49da09c8e606e705641b
159,514
def prepare_source_valid_lengths(F, valid_length, query_data, num_heads: int): """ Returns an int32 valid length tensor of shape (batch * num_heads, query_length) to be used in the softmax operation in DotAttentionCell with the length argument. Due to broadcast_like, dtypes of valid_length and query_data must be the same. :param valid_length: Valid length information. Shape: (batch,). :param query_data: Tensor from which the query_length dimension is derived. Expected shape: (X, query_length, ...). :param num_heads: Number of attention heads. :return: int32 tensor of shape (batch * num_heads, query_length). """ # (batch * heads,) att_valid_length = F.repeat(valid_length, repeats=num_heads, axis=0) att_valid_length = F.broadcast_like(F.expand_dims(att_valid_length, axis=1), query_data, lhs_axes=(1,), rhs_axes=(1,)) return F.cast(att_valid_length, dtype='int32')
a67d728928272b6fafe7d2ae52d3d6044b5ee129
613,392
def get_hand(indices, game_lines): """Return a subset of the game between the supplied indices.""" hand_start_index, hand_end_index = indices return game_lines[hand_start_index : hand_end_index + 1]
ef9d9239da6737c79e334722643a25123c63a8c8
91,042
import re def tamper(payload, **kwargs): """ Prepends (inline) comment before parentheses (e.g. ( -> /**/() Tested against: * Microsoft SQL Server * MySQL * Oracle * PostgreSQL Notes: * Useful to bypass web application firewalls that block usage of function calls >>> tamper('SELECT ABS(1)') 'SELECT ABS/**/(1)' """ retVal = payload if payload: retVal = re.sub(r"\b(\w+)\(", r"\g<1>/**/(", retVal) return retVal
bd1698e146f4b683fea07f15b456256a093bf0d8
533,421
import configparser def configparser_config(filename): """Return configparser config object from a filename""" config = configparser.ConfigParser() config.read(filename) return config
7ff9dc47e1970b1417800300635b484d43793bc2
144,010
def prediction_output_partial(output): """ Function that given the predicted labels, updates the labels to fit intended data output format. Specifically, corrects predicted answer spans that start with a 2, to instead start with a 1 Input: array of labels Output: array of corrected labels """ corrected_output = [] prev_label = 0 for idx, label in enumerate(output): if label == 2 and prev_label == 0: corrected_output.append(1) prev_label = 1 else: corrected_output.append(label) prev_label = label return corrected_output
1ff41b5ba514de9fc39360f823653fdd069e98de
263,887
def create_np_dtype(py_obj, h_group, name, **kwargs): """ dumps an numpy dtype object to h5py file Parameters ---------- py_obj (numpy.dtype): python object to dump; should be a numpy dtype, e.g. numpy.float16 h_group (h5.File.group): group to dump data into. name (str): the name of the resulting dataset kwargs (dict): keyword arguments to be passed to create_dataset function Returns ------- tuple containing h5py.Dataset and empty list of subitems """ d = h_group.create_dataset(name, data=bytearray(py_obj.str,"ascii"), **kwargs) return d,()
0bc2e2791c38e98a1c65484d42972332740e61a9
573,408
def stringify(value): """ Escapes a string to be usable as cell content of a CSV formatted data. """ stringified = '' if value is None else str(value) if ',' in stringified: stringified = stringified.replace('"', '""') stringified = f'"{stringified}"' return stringified
74d5683a79e7efab48ec24767d1c912b66c0e65b
23,450
def sql_comment(comment: str) -> str: """ Transforms a single- or multi-line string into an ANSI SQL comment, prefixed by ``--``. """ """Using -- as a comment marker is ANSI SQL.""" if not comment: return "" return "\n".join(f"-- {x}" for x in comment.splitlines())
57ff5ce80ac0f9e37bfe7259d8d3a6d576ffdee7
425,121
def requires_2fa(response): """Determine whether a response requires us to prompt the user for 2FA.""" if ( response.status_code == 401 and "X-GitHub-OTP" in response.headers and "required" in response.headers["X-GitHub-OTP"] ): return True return False
53a1ddfd0b4deaf49155881e9909cc0e6be4f2ac
112,223
import torch def lossFunctionKLD(mu, logvar): """Compute KL divergence loss. Parameters ---------- mu: Tensor Latent space mean of encoder distribution. logvar: Tensor Latent space log variance of encoder distribution. """ kl_error = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return kl_error
2a9cc400d9a357f69f3f9964304c4cbc0ac4331e
675,721
from typing import Sequence def vector_sub(u: Sequence[int], v: Sequence[int]) -> Sequence[int]: """Returns the difference (u - v) of equal-length vectors u and v.""" return [u[i] - v[i] for i in range(len(u))]
dd3f4aa878037bf975ccd032bab0a93e808c0743
215,879
def read_fasta(filename): """ Returns the sequences from a fasta file. :param filename: filename of the fasta file :type filename: str :return: list """ sequences = [] with open(filename) as file: for i, line in enumerate(file): if i % 2 == 0: pass else: sequences.append(line.rstrip()) return sequences
b476eb4a4572e195e85eb6da7fb31da628733468
531,566
import re def scrape_console_capturing(html): """ Scrapes system capturing percentage from Console page HTML """ # Capturing percentage examples: # <span class="status"><label>Capturing</label>66.7%</span> # <span class="status"><label>Capturing</label>100%</span> capturing_regex = r'Capturing\D+(\d+\.?\d*)%' capturing_match = re.search(capturing_regex, html) return round(float(capturing_match.groups()[0])) if capturing_match else ''
a960bcf6cc75db1948e04c4f617e261c820b045a
361,050
def lcm(a: int, b: int) -> int: """Return a LCM (Lowest Common Multiple) of given two integers >>> lcm(3, 10) 30 >>> lcm(42, 63) 126 >>> lcm(40, 80) 80 >>> lcm(-20, 30) 60 """ g = min(abs(a), abs(b)) while g >= 1: if a % g == 0 and b % g == 0: break g -= 1 return abs(int((a * b) / g))
7039abbcb72edcd01fda66e1d04444821e1a50d6
576,564
def set_environ(env_holder: dict, overwrite: bool): """Return lambda to set environ. Use setdefault unless overwrite is specified. """ if overwrite: return lambda k, v: env_holder.update({k: str(v)}) return lambda k, v: env_holder.setdefault(k, str(v))
a4add31810d5fce63a086135f64093f17fdb5042
533,997
import re def countRotated(text): """Counts the number of ocurrences of '\w\n' in text. Args: text (string): text that is going to be processed. Returns: int: number of ocurrences """ return len(re.findall(r'\w\n', text))
5a6e5559e577f04b27dc4da9c29d58dcd5df73dd
325,177
import json def LoadNotebook(filename): """Read a IPython notebook (.ipynb). Reads the file and parses it as JSON. Args: filename: The path to the .ipynb file. Returns: A parsed JSON object (dictionary). """ with open(filename) as f: return json.load(f)
d8fbefd2b9891bfc8d6e5d2876c4c400b8219990
619,325
def are_features_consistent(train_df, test_df, dependent_variables=None): """Verifies that features in training and test sets are consistent Training set and test set should have the same features/columns, except for the dependent variables train_dr: pd.DataFrame training dataset test_df: pd.DataFrame test dataset dependent_variables: list list of column names for the dependent variables """ if dependent_variables is None: features_training_set = train_df.columns else: features_training_set = train_df.drop(dependent_variables, axis=1).columns features_test_set = test_df.columns features_diff = set(features_training_set).symmetric_difference(features_test_set) assert features_diff == set(), f"Discrepancy between training and test feature set: {features_diff}" return True
cc9f0f42519a77bb01ff1ceb31c89b64f0af0832
650,377
from operator import truediv def distances_from_average(test_list): """Return a list of distances to the average in absolute terms.""" avg = truediv(sum(test_list), len(test_list)) return [round(float((v - avg) * - 1), 2) for v in test_list]
e59e404e824e35d99dbf8fb8f788e38b7a19cc03
30,611
import re def get_confidence(s) -> int: """ Using the input string (for x_wconf), parse out the confidence value and return the confidence (100 - confidence) """ conf_rgx = r'''(?<=x_wconf )([0-9]+)''' match = re.search(conf_rgx, s.get('title')) return 100 - int(match.group(0)) if match else 100
776f260c3b079ac8a5fc3b773ee87e6568ba88d8
143,543
def format_test_id(test_id) -> str: """Format numeric to 0-padded string""" return f"{test_id:0>5}"
91d6acefdda63300a32c832320662d9b5403d9fd
667,948
def format_datestamp(datestamp): """Format datestamp to an OAI-PMH compliant format. Parameters ---------- datestamp: datetime.datetime A datestamp. Return ------ str: Formatted datestamp. """ return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
f050dd4f18691034c0414a4d9fa51629b0208d6a
704,023
import torch def sgn(x): """PyTorch sign function""" return torch.sign(x)
26edea505941a6ede1786d8a3d634c759742a64c
313,065
def diff_renorm(image): """Maps image back into [0,1]. Useful for visualising differences""" scale = 0.5/image.abs().max() image = image*scale image += 0.5 return image
18a2295f9a9497a25123195544296115cbdf5413
623,584
def _is_ethernet(port_data): """Return whether ifIndex port_data belongs to an Ethernet port. Args: port_data: Data dict related to the port Returns: valid: True if valid ethernet port """ # Initialize key variables valid = False # Process ifType if 'ifType' in port_data: # Get port name name = port_data['ifName'].lower() # Process ethernet ports if port_data['ifType'] == 6: # VLAN L2 VLAN interfaces passing as Ethernet if name.startswith('vl') is False: valid = True # Return return valid
5c944f324eac43c03cb63b24a2eb840e5c00e01e
221,271
async def resolve_delete_user(_root, info, id): """Resolver function for deleting a user object""" user = await info.context["registry"].get(id) await info.context["registry"].delete(user.id) return True
ca1a7ffc0c3684f8e21301073fc512a630a1e0f3
108,653
def count_null(column=None, index=None): """ Get total number of null values in a DplyFrame, one or more rows of DplyFrame, or one or more columns of DplyFrame :param column: one column name or one list of column names of a DplyFrame :param index: one row name or one list of row names of a DplyFrame :return: a nonnegative integer """ def _count_null(d1, column=None, index=None): if column is not None: return d1.pandas_df[column].isna().sum().sum() if index is not None: return d1.pandas_df.loc[index].isna().sum().sum() return d1.pandas_df.isnull().sum().sum() return lambda d1: _count_null(d1, column, index)
e8ce284adf73f765b44cf333e64a32e3aed85b98
228,247
def find_span_binsearch(degree, knot_vector, num_ctrlpts, knot, **kwargs): """ Finds the span of the knot over the input knot vector using binary search. Implementation of Algorithm A2.1 from The NURBS Book by Piegl & Tiller. The NURBS Book states that the knot span index always starts from zero, i.e. for a knot vector [0, 0, 1, 1]; if FindSpan returns 1, then the knot is between the half-open interval [0, 1). :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param num_ctrlpts: number of control points, :math:`n + 1` :type num_ctrlpts: int :param knot: knot or parameter, :math:`u` :type knot: float :return: knot span :rtype: int """ # Get tolerance value tol = kwargs.get('tol', 10e-6) # In The NURBS Book; number of knots = m + 1, number of control points = n + 1, p = degree # All knot vectors should follow the rule: m = p + n + 1 n = num_ctrlpts - 1 if abs(knot_vector[n + 1] - knot) <= tol: return n # Set max and min positions of the array to be searched low = degree high = num_ctrlpts # The division could return a float value which makes it impossible to use as an array index mid = (low + high) / 2 # Direct int casting would cause numerical errors due to discarding the significand figures (digits after the dot) # The round function could return unexpected results, so we add the floating point with some small number # This addition would solve the issues caused by the division operation and how Python stores float numbers. # E.g. round(13/2) = 6 (expected to see 7) mid = int(round(mid + tol)) # Search for the span while (knot < knot_vector[mid]) or (knot >= knot_vector[mid + 1]): if knot < knot_vector[mid]: high = mid else: low = mid mid = int((low + high) / 2) return mid
6050e7c09f290caa54bb6fb2f75082824c0faa85
667,760
def slide_elements(conn, item_id, group_list=None): """Retrieve a list of elements from the HTK annotation response. Each element in this list corresponds to a polygon. Optionally, ask for elements that belong to a specific group or list of groups. """ # Pull down the annotation objects annotations_resp = conn.get('annotation/item/' + item_id) img_metadata = conn.get('item/' + item_id + '/tiles') # img_name = conn.get('item/' + item_id)['name'] if len(annotations_resp) == 0: print('The annotation response had a length of Zero') return [], [] # Initialize the gt annotation holder target_elements = [] # Cycle through each annotation on this item for annotation in annotations_resp: elements = annotation['annotation']['elements'] # Cycle through each of the annotation elements for element in elements: # Check that this item has a group (i.e. a class) if 'group' in element.keys(): # If this group is what we're looking for, then pull it if group_list is not None and element['group'] in group_list: target_elements.append(element) elif group_list is None: target_elements.append(element) return target_elements, img_metadata
3f645916e0abfd762a284da78827c34a7e12eaff
399,114
def fitness_func(individual): """Evaluate the fitness of an individual using hamming distance to [1,1, ... , 1]. returns value within [0,1] """ # ideal vector target = [1] * len(individual) # hamming distance to ideal vector distance = sum([abs(x - y) for (x,y) in zip(individual, target)]) / float(len(target)) # invert for fitness return 1 - distance
f4c131d8aa7c0707997ff8ad6b250f7e9ea57d7c
445,109
def matrix(rows,columns,val): """ Bulds a matrix of size rows x columns, with val values in cells NOTE: Does not checks negative values Parameters ---------- rows(int) : The number of rows of the matrix columns(int) : The number of columns of the matrix val(int) : The value in every cell of the matrix Returns ------- list : The matrix """ matrix = [] for i in range(rows): row = [] for j in range(columns): column = val row += [column] matrix += [row] return matrix
ec9ac171f83178d17b1dbfbf9529846f98dd48cd
515,021
import requests import time def elements_json_overpass(overpass_query): """ This function gets the response of the Overpass API according to a query. It returns a json. It must be used when the overpy can not get specific elements from API response. Example: bound limits of a way from Open Street Map. :param overpass_query: String :return: list """ data = {} overpass_url = "http://overpass-api.de/api/interpreter" while True: try: response = requests.get(overpass_url, params={'data': overpass_query}) data = response.json() except: time.sleep(30) continue break elements = data.get("elements") return elements
2409e9371bf9db7ceb801b5910dbab9697154809
351,660
import torch def complex_mult(data1, data2, dim=-1): """ Element-wise complex matrix multiplication X^T Y :param data1 -> torch.Tensor: :param data2 -> torch.Tensor: :param dim -> int: dimension that represents the complex values """ assert data1.size(dim) == 2 assert data2.size(dim) == 2 re1, im1 = torch.unbind(data1, dim=dim) re2, im2 = torch.unbind(data2, dim=dim) return torch.stack([re1 * re2 - im1 * im2, im1 *re2 + re1 * im2], dim=dim)
629dca58f4f1706b1263d835e8d3cf1628ea4ebb
523,365
def hide_axis(ax, axis='x', axislabel=True, ticklabels=True, ticks=False, hide_everything=False): """ Hide axis features on an axes instance, including axis label, tick labels, tick marks themselves and everything. Careful: hiding the ticks will also hide grid lines if a grid is on! Parameters ---------- axis : 'x' or 'y' 'both' axislabel : True Hide the axis label ticklabels : True Hide the tick labels ticks : True Hide the tick markers hide_everything : False Hides labels, tick labels and axis labels. """ if axis not in ['x','y','both']: raise AttributeError('axis must be "x" or "y" or both') axis_to_modify = [] if hide_everything: ticks = True; axislabel=True; ticklabels=True # Set axis labels if axis == 'x' or axis == 'both': axis_to_modify.append(ax.get_xaxis()) if axislabel: ax.set_xlabel('') if axis == 'y' or axis == 'both': #not elif becuase "both" stipulation axis_to_modify.append(ax.get_yaxis()) if axislabel: ax.set_ylabel('') for an_axis in axis_to_modify: if ticklabels: an_axis.set_ticklabels([]) if ticks: an_axis.set_ticks([]) return ax
11160c6f928a8b5befd4a20cbd04f97a5f6dfae0
677,715
def convert_time(timestamp): """ Convert time stamp to total hours, minutes and seconds """ hrs = int(timestamp // 3600) if hrs < 10: hrs = "{0:02d}".format(hrs) mins = "{0:02d}".format((int(timestamp % 3600) // 60)) secs = "{0:02d}".format((int(timestamp % 3600) % 60)) return hrs, mins, secs
57e9696cbdd8b28cac8011da97b4d5f13190c909
497,127
def redirect(url: str, perm: bool = True) -> bytes: """Send a 3x (redirect) response. :param url: The URL to redirect to. :param perm: Whether this is a permanent redirect. :return: Bytes to be sent to the client. """ code = 31 if perm else 30 return f'{code} {url}\r\n'.encode()
b931f28e34666004e0e65773a98474a4eb580ab3
210,920
import re def _slugify_resource_name(name): """Slugify resource name """ return re.sub(r'[^a-zA-Z0-9_]', '_', name)
730d9f198d3f4ad20dfe8c3608a85fde6b01cc60
409,310
import torch def distance_to_reference_trajectory(pred_centroid: torch.Tensor, ref_traj: torch.Tensor) -> torch.Tensor: """ Computes the distance from the predicted centroid to the closest waypoint in the reference trajectory. :param pred_centroid: predicted centroid tensor, size: [batch_size, 2] :type pred_centroid: torch.Tensor, float :param ref_traj: reference trajectory tensor, size: [batch_size, num_timestamps, 2] :type ref_traj: torch.Tensor, float :return: closest distance between the predicted centroid and the reference trajectory, size: [batch_size,] :rtype: torch.Tensor, float """ # [batch_size, 2] assert pred_centroid.dim() == 2 # [batch_size, num_timestamps, 2] assert ref_traj.dim() == 3 # [batch_size,] euclidean_distance = torch.linalg.norm(pred_centroid.unsqueeze(1) - ref_traj, ord=2, dim=-1) return torch.amin(euclidean_distance, dim=1)
d123e601a96c8db7d6a947e23a4ab308352cf778
571,119
def _divide_bundles(bundles): """Take each subsegment inside a bundle and put it in its own bundle, copying the bundle metadata.""" divided = [] for bund in bundles: for t in bund['times']: new_bund = bund.copy() new_bund['times'] = [t] divided.append(new_bund) return divided
a95524c0845b278c282ef24510be5c9734d34344
348,205
def calculate_buy_and_hold_perc(df): """Uses the first closing price and closing price to determine the return percentage if the strategy was simply buy and hold. """ first_close = df.iloc[0].close last_close = df.iloc[-1].close buy_and_hold_perc = (1 - (first_close / last_close)) * 100 return round(buy_and_hold_perc, 3)
bb9d651de5d0af8c7ccbd9db37ccaf645ba87127
339,595
from typing import Any from typing import Type def _is_measurement_device(instrument_handle: Any, class_type: Type) -> bool: """ Returns True if the instrument handle is of the given type, else False. This function checks whether the given handle is of the correct instrument type. All error's are catched related to importing of not installed drivers or instruments which are not connected. Args: instrument_handle: An measurement device instance. class_type: The type of the measurement class. Returns: True if of the given class_type, else False. """ try: is_present = isinstance(instrument_handle, class_type) except Exception: is_present = False return is_present
7a0d9ba51a36df8c800f35e20b4a3ae690522502
118,029
def equals_auto_tol(x: float, y: float, precision: float = 1e-6) -> bool: """ Returns true if two numbers are equals using a default tolerance of 1e-6 about the smaller one. """ return abs(x - y) < min(x, y) * precision;
7bfabdb43ebd6e662e82b9590e8bf0975b2897d4
84,846
def add_lists(*lists): """Add two lists together without numpy For example, given lists: [1, 2] [3, 4] The result is: [4, 6] Lists are sliced to prevent mutation. """ lists = (l[:] for l in lists) return list(map(sum, zip(*lists)))
48d7d41d9466d95e5db705268c469d502478496d
125,807
def author(entry): """ Convert author field to list """ if 'author' in entry: entry['author'] = [name for name in entry['author'].replace('\n', ' ').split(' and ') if name.strip()] return entry
55eaef836925f0249828f18ac5c01cf24d51b339
387,984
import math def poisson(lam, n, highest_k): """Generates probability of n or more cognates assuming Poisson distribution. Args: lam: The mean of the Poisson n: true cognates highest_k: bottom of range to generate Returns: area under the Poisson distribution for k >= n, or 0 if underflow. """ try: p = 0 loglam = math.log(lam) for k in range(n, highest_k): p += math.e ** (loglam * k - lam - math.log(math.factorial(k))) return p except ValueError: return 0
64785a429cbba7e7b6a6ffb60d52c0981799f344
381,418
def normalize_whitespace(text): """ Remove redundant whitespace from a string. """ text = text.replace('"', '').replace("'", '') return ' '.join(text.split())
b4444169b25550aca691c44e35ba7a1c52342067
304,631
def dump_param(val): """dump a query param value""" return str(val)
8d46aac8fa3f6fd7ae29395e19b4f86fbc1019c7
456,648
def bypass_csrf_protection(f): """ Decorator that allows a route to bypass the need for a CSRF nonce on POST requests. This should be considered beta and may change in future versions. :param f: A function that needs to bypass CSRF protection :return: Returns a function with the _bypass_csrf attribute set which tells CTFd to not require CSRF protection. """ f._bypass_csrf = True return f
bb19c8ed38b11b7766f11c4f4a818b80fa01539a
254,378
def find_sort_symbol(list_names): """ :param list_names: список имен :return: первый символ имени, реже всего встречающийся в списке """ # инициализация пустого словаря dict_First_sym_of_names dict_First_sym_of_names = {} #print(type(dict_First_sym_of_names),dict_First_sym_of_names) # огранизация цикла для перебора имён из списка list_names for name in list_names: # присвоение переменной First_sym_of_name первого символа имени [0] First_sym_of_name = name[0] # наполнение славоря dict_First_sym_of_names с key = First_sym_of_name и value = количество повторов символа dict_First_sym_of_names[First_sym_of_name] = dict_First_sym_of_names.get(First_sym_of_name, 0) + 1 # преобразование типа словаря в тип листа и сортировка по второму элементу листа, т.е. по количествам повторов символа # x[0] - key (str), а x[1] - value (integer) dict_First_sym_of_names = sorted(list(dict_First_sym_of_names.items()), key=lambda x: x[1]) #print(type(dict_First_sym_of_names),dict_First_sym_of_names) return dict_First_sym_of_names[0][0]
e779cbb4198dccdc46e0fc151566b7559f335f22
111,737
def misclassification_percentage(y_true, y_pred): """ Returns misclassification percentage ( misclassified_examples / total_examples * 100.0) """ misclassified_examples = list(y_true == y_pred).count(False) * 1. total_examples = y_true.shape[0] return (misclassified_examples / total_examples) * 100.0
262057b506ea3e3edbefc9bc019f36b67b485ee1
686,687
def genericGetContent(path): """ Retrieve the data from a given file path. """ # We are not currently willing to use a with-statement here, for backwards # compatibility. fp = path.open() try: return fp.read() finally: fp.close()
d2404ef7eff427fa22e4ff9e6970bd684d9f4983
70,053
import configparser def get_details(field, detail): """ Config parser Reads config.ini file, returns data from an appropriate field and detail """ config = configparser.ConfigParser() config.read("config.ini") return config[field][detail]
a9242fe3294130c44af495d90cfa2fb6d506118e
463,394
def unique_chains(buss_obj, location): """Counts the unique number of chains in the given location""" # storing for unique ids seen_id = set() # dict for chain and number of stores of that chain chains = {} for buss in buss_obj: if location == buss.location: # eleminating the duplicated stores by checking ID if buss._id not in seen_id: seen_id.add(buss._id) # if we are seeing the store first time # the num is 1 if buss.name not in chains: chains[buss.name] = 1 # each time we see the chain we increment # the number of stores else: chains[buss.name] += 1 # make a list of list from chains dictionary keys and values chains = list(map(list, chains.items())) # sorting the number of stores by descending, # and the names by alphabetical order return sorted(chains, key=lambda x: (-x[1], x[0]))
d74e2df1cfdf53a6a260ce6a171f5ca22f4fb3fe
547,502
def parse_join_type(join_type: str) -> str: """Parse and normalize join type string. The normalization will lower the string, remove all space and ``_``, and then map to the limited options. Here are the options after normalization: ``inner``, ``cross``, ``left_semi``, ``left_anti``, ``left_outer``, ``right_outer``, ``full_outer``. :param join_type: the raw join type string :raises NotImplementedError: if not supported :return: the normalized join type string """ join_type = join_type.replace(" ", "").replace("_", "").lower() if join_type in ["inner", "cross"]: return join_type if join_type in ["inner", "join"]: return "inner" if join_type in ["semi", "leftsemi"]: return "left_semi" if join_type in ["anti", "leftanti"]: return "left_anti" if join_type in ["left", "leftouter"]: return "left_outer" if join_type in ["right", "rightouter"]: return "right_outer" if join_type in ["outer", "full", "fullouter"]: return "full_outer" raise NotImplementedError(join_type)
7dcfbb1efe992b9d23463f3f811d560a60580105
309,058
from datetime import datetime def pretty_time(x: float) -> datetime: """Return a datetime object from unix timestamp.""" return datetime.fromtimestamp(x)
3c6514864aaca4a6ba06584522d3f27b14404902
293,102
def get_driver_readiness(config: dict) -> str: """Get the code_readiness config setting.""" return config.get("code_readiness", "Release")
43728632dde9e9235019dd06ffbc62ce9cf8679c
632,656
from typing import Optional def safe_language_tag(name: Optional[str]) -> str: """Convert language names to tags that are safe to use for identifiers and file names. Args: name: Name to convert to a safe name. Can be `None`. Returns: A safe string to use for identifiers and file names. """ if name is None: return "" name = name.lower() return {"c++": "cpp", "objective-c": "objc"}.get(name, name)
ef128910a8b17d41f165147e5ac7eea82677a1d5
36,855