content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_total() -> str: """Gets the total amount to try to calculate a match for.""" return input("\nWhat is the total amount?: ")
c79a8f1e7c52ee0257db44eb9cc6892abc0d5a9f
77,183
def create_java_stop_cmd(port): """ This creates the stop command for an application which is uniquely identified by a port number. Additional portions of the start command are included to prevent the termination of other processes. Args: port: The port which the application server is running Returns: A string of the stop command. """ stop_cmd = "/usr/bin/python /root/appscale/stop_service.py " \ "java {0}".format(port) return stop_cmd
90e7f07c9f6d96d81c49836114f0310e8c358968
77,187
def isIncreasing(seq): """Test if sequence is increasing.""" for i in range(1, len(seq)): if not (seq[i-1] <= seq[i]): return False return True
2954cc14e1e5e74f6f4dc9b04db72d8e45076f24
77,189
def build_info_section(apidoc_conf): """Converts apidoc.json to swagger configuration Arguments: apidoc_conf {dict} -- apidoc.json converted to a dictionary Returns: dict -- dictionary matching the require infomation section for swagger """ info_section = {} info_section["title"] = apidoc_conf.get("title") or apidoc_conf.get("name") info_section["version"] = apidoc_conf.get("version", "0.0.0") info_section["description"] = apidoc_conf.get("description") return info_section
121036896eb6a68307debb8e760fd458182107ad
77,190
def _gen_neighbor_keys(result_prefix="") -> tuple: """Generate neighbor keys for other functions to store/access info in adata. Parameters ---------- result_prefix : str, optional generate keys based on this prefix, by default "" Returns ------- tuple: A tuple consisting of (conn_key, dist_key, neighbor_key) """ if result_prefix: result_prefix = result_prefix if result_prefix.endswith("_") else result_prefix + "_" if result_prefix is None: result_prefix = "" conn_key, dist_key, neighbor_key = ( result_prefix + "connectivities", result_prefix + "distances", result_prefix + "neighbors", ) return conn_key, dist_key, neighbor_key
a2ece550bb1921d2d1fb34a23d6b33f53a93965d
77,193
def suffixed_file_name(file_path, suffix_string): """ Returns file path with appended string (preserving file type) :param file_path: (string) either relative or absolute path of file :param suffix_string: (string) string to append to the original file name :return: (string) suffixed file path example: append_path("foo.html.bar.html", "_BAZ") >>> "foo.html.bar.html_BAZ.html" """ # reverse split, with max of 1 split: string_parts = file_path.rsplit('.', 1) return string_parts[0] + suffix_string + '.' + string_parts[1]
d6f1cbb1e241c8486b55cbc307e7908815416a63
77,196
def datetime_to_UNIXtime(date): """ Converts a ``datetime.datetime`` object to its correspondent UNIXtime :param date: the ``datetime.datetime`` object :type date: ``datetime.datetime`` :returns: an int UNIXtime :raises: *TypeError* when bad argument types are provided """ return int(date.timestamp())
7057c0581da462b4de1bf6ed64f3341a1888cb25
77,197
def strip_decorator( text, pre_decor='"', post_decor='"'): """ Strip initial and final character sequences (decorators) from a string. Args: text (str): The text input string. pre_decor (str): initial string decorator. post_decor (str): final string decorator. Returns: text (str): the text without the specified decorators. Examples: >>> strip_decorator('"test"') 'test' >>> strip_decorator('"test') 'test' >>> strip_decorator('<test>', '<', '>') 'test' """ begin = len(pre_decor) if text.startswith(pre_decor) else None end = -len(post_decor) if text.endswith(post_decor) else None return text[begin:end]
ffec5aebadb578c085a649baaae472d0aee3d1dc
77,198
def make_date_filter(start_date, end_date): """ Create a function to filter dates in the specified date range (inclusive). Arguments: - start_date: datetime date object - end_date: datetime date object Returns: - Custom function object """ # Convert dates from datetime format to int. start_code = int(start_date.strftime("%Y%m%d")) end_code = int(end_date.strftime("%Y%m%d")) def custom_date_filter(match): """ Determine if a single filename is in the date range. Arguments: - match: regex match object based on FILENAME_REGEX applied to a filename str Returns: - boolean """ # If regex match doesn't exist, current filename is not an appropriately # formatted source data file. if not match: return False # Convert date found in CSV name to int. code = int(match.groupdict()['date']) # Return boolean True if current file date "code" is within the defined date range. return start_code <= code <= end_code return custom_date_filter
03b6b1c43753caf095e4175c78cc0f96540eb1f2
77,207
def levenshtein_distance(a, b): """Return the Levenshtein distance between two strings.""" # Taken from Rosetta Code # <http://rosettacode.org/wiki/Levenshtein_distance#Python> if len(a) > len(b): a, b = b, a ds = list(range(len(a) + 1)) for ib, cb in enumerate(b): ds2 = [ib+1] for ia, ca in enumerate(a): if ca == cb: ds2.append(ds[ia]) else: ds2.append(min((ds[ia], ds[ia+1], ds2[-1])) + 1) ds = ds2 return ds[-1]
e66405333b8efa78d9b961d40b1b3424077d83a1
77,210
def hello_world(name: str) -> str: """ Retuns hello name Args ---------- name: string name to say hello Returns ---------- text: string hello name text """ text = f"hello {name}" print(text) return text
ccd20984837a14be843a7898433b7a643ebf11db
77,215
def JsonComplexEncoder(obj): """ Extends json.loads() to convert bytes to string """ if isinstance(obj, bytes): return str(obj) else: return obj
e84fb26a284ca37aef2e419174a438a61dbc9139
77,218
import torch def correlation(x, y): """ A function to compute the correlation coefficient """ # computing the covariance cov = torch.sum((x - x.mean()) * (y - y.mean())) # computing the standard deviations std_x = torch.sqrt(torch.sum((x - torch.mean(x))**2)) std_y = torch.sqrt(torch.sum((y - torch.mean(y))**2)) # computing r r = cov / (std_x * std_y) return r
6551b7f88bf38bc67690a14a29a82e629bfc7b05
77,220
def _DiscardUnusedTemplateLabelPrefixes(labels): """Drop any labels that end in '-?'. Args: labels: a list of label strings. Returns: A list of the same labels, but without any that end with '-?'. Those label prefixes in the new issue templates are intended to prompt the user to enter some label with that prefix, but if nothing is entered there, we do not store anything. """ return [lab for lab in labels if not lab.endswith('-?')]
1530706beece2eb53cfbeb7475541d7e27223109
77,222
def legacy_position_transform(positions): """ Transforms positions in the tree sequence into VCF coordinates under the pre 0.2.0 legacy rule. """ last_pos = 0 transformed = [] for pos in positions: pos = int(round(pos)) if pos <= last_pos: pos = last_pos + 1 transformed.append(pos) last_pos = pos return transformed
5c4e753e3480c5313b4d82f2e44cb78e6b216fc5
77,225
def rowlst_to_rowdict(rowLst, keyName): """ Convert a list of dictionaries into a dictionary of dictionaries. """ rowDict = {} for e in rowLst: key = e.pop(keyName) rowDict[key] = e return rowDict
18e7a116da4f3092ad5bfc806e90985503a1cdf6
77,227
def fitter(g_model, pars, wavelength, flux, weights=None, method='leastsq', verbose=True): """ Fits the model to the data using a Levenberg-Marquardt least squares method by default (lmfit generally uses the scipy.optimize or scipy.optimize.minimize methods). Prints the fit report, containing fit statistics and best-fit values with uncertainties and correlations. Parameters --------- g_model : the model to fit to the data pars : Parameters object containing the variables and their constraints wavelength : :obj:'~numpy.ndarray' wavelength vector flux : :obj:'~numpy.ndarray' the flux of the spectrum weights : :obj:'~numpy.ndarray' or None the inverse of the variance, or None. Default is None. method : str the fitting method to use, for example: - 'leastsq' - Levenberg-Marquardt least squares method (default) - 'emcee' - Maximum likelihood via Monte-Carlo Markov Chain - 'dual_annealing' - see https://lmfit.github.io/lmfit-py/fitting.html for more options verbose : boolean whether to print out results and statistics. Default is true. Returns ------- best_fit : class the best fitting model """ #fit the data best_fit = g_model.fit(flux, pars, x=wavelength, weights=weights, method=method) #print out the fit report if verbose: print(best_fit.fit_report()) return best_fit
fa85e96ba2d7bfceeb5270ba08eb2e33c9d2cccc
77,228
def lactate_norm(series): """Calculates lactate norm of a series of data. Unlike a simple average, the lactate norm emphasizes higher values by raising all values to the 4th power, averaging them, then taking the 4th root. Emphasizing higher values is rooted in the fact that best-fit curves between VO2 (oxygen consumption) and lactate tend to follow a 4th-order relationship. """ return (series ** 4).mean() ** 0.25
6a6b475dbf4c4506b4da5216b25982f776405500
77,233
import math def untemper(interval, div=12): """ Converts a tempered interval into a frequency ratio. Parameters ---------- interval : int | float The interval to convert. The value can be an integer, float, or a list of the same. div : int The divisions per octave. 12 will convert from semitones and 1200 will convert from cents. Returns ------- The untempered frequency ratio """ func = lambda i,d: math.pow(2.0, i/d) if type(interval) is list: return [func(i, div) for i in interval] return func(interval, div)
116abdc40aec21605b6758d58577e2cb2371f97f
77,240
def get_normalized_data(data, column_name): """ Minimax normalize the data in a DataFrame column. Args: data (Pandas.DataFrame): The DataFrame with the column to normalize column_name (string): The name of the column to normalize Returns: Pandas.DataFrame: The DataFrame with a column added for the normalize column data. """ min = data[column_name].min() max = data[column_name].max() data['%s_norm' % column_name] = ((data[column_name] - min) / (max - min)) return data
5168bbc486a05524e93cbaebdcd09a512b9c0531
77,241
import torch def l2n(x: torch.Tensor, eps: float = 1e-6, dim: int = 1): """ L2 normalize the input tensor along the specified dimension Args: x (torch.Tensor): the tensor to normalize eps (float): epsilon to use to normalize to avoid the inf output dim (int): along which dimension to L2 normalize Returns: x (torch.Tensor): L2 normalized tensor """ x = x / (torch.norm(x, p=2, dim=dim, keepdim=True) + eps).expand_as(x) return x
08ee657ea5e29d2e9b8c789fd39154cfe2c4ced8
77,247
def recursive_linear_search(array:list,target:int,index:int =0)-> int: """ This a python implementation of recursive linear search algorithm. Where we are applying linear search algorithm with recursion. Parameters : 1>array : First parameter to the function is the array(searching space) (accepts both sorted and unsorted array). 2>target : Second parameter to the function is target which is the element to be search in the array(argument 1). 3>index : Third parameter is default parameater to the function. Default value of it is 0 it is a starting position of our seraching. Output: This function will return the index (starting form zero) of the target(2nd argument) if found in the giving array (1st argument) else will return -1 Exceptions : TypeError : This exception will be raised when the 1st argument(array) will be any other type rather than list,tuple or string. ValueError: This exception will be raised when the 1st argument of type string and 2nd argument(target) will be of type non-string.As you cannot search non-str element in str. Examples : >>> recursive_linear_search([1,2,3,4,5],5) 4 >>> recursive_linear_search([5,4,3,2,1],3) 2 """ if type(array) not in (list,tuple,str) : raise TypeError("Invalid input(argument 1) only list,tuple,str allowed") if type(array)==str and type(target)!=str : raise ValueError("Invalid input(argument 2) cannot search non-str in str") if index==len(array): return -1 if array[index]==target: return index return recursive_linear_search(array,target,index+1)
e2cb7331813aa11bef8fd19247ff3667b3f1e9ae
77,249
def docbuild(**params): """Insert argument text into decorated function's docstring. This function easily builds function docstrings and is intended to be used before wrapping the function with :meth:`docsub`. Args: **params: The strings which would be used to format the docstring templates. """ def decorator(decorated): decorated.__doc__ = decorated.__doc__.format( **params, ) return decorated return decorator
76b9f7a8228de52bee3ec1611fca573f32378bc0
77,250
from typing import Sequence from typing import List def _choose_chunk_shape(write_shape: Sequence[int], target_elements: int) -> List[int]: """Chooses a chunk shape that evenly divides write_shape. The chunk shape is chosen such that the total number of elements is less than or equal to `target_elements`, but is otherwise as large as possible. This uses a greedy algorithm that attempts to split the largest dimensions first. Args: write_shape: Write shape for which to choose a chunk shape. target_elements: Desired number of elements in chosen chunk shape. Must be >= 1. Returns: List of length `len(write_shape)` specifying the chosen chunk shape. """ assert target_elements >= 1 rank = len(write_shape) # `dim_factors[i]` is the list of divisors of `write_shape[i]` dim_factors = [ [i for i in range(1, size + 1) if size % i == 0] for size in write_shape ] # The current chunk shape is: # [dim_factors[i][-1] for i in range(rank)] def get_total_elements(): """Returns the number of elements in the current chunk shape.""" total_elements = 1 for i in range(rank): total_elements *= dim_factors[i][-1] return total_elements # Reduce the current chunk shape until the desired number of elements is # reached. while get_total_elements() > target_elements: # Greedily reduce the largest dimension. This is not guaranteed to bring us # the closest to `target_elements`, but is simple to implement and should # work well enough. dim_to_reduce = -1 dim_to_reduce_size = 1 for i in range(rank): size = dim_factors[i][-1] if size > dim_to_reduce_size: dim_to_reduce_size = size dim_to_reduce = i # Can only fail to choose `dim_to_reduce` if all dimensions have size of 1. # But that cannot happen since `target_elements >= 1`. assert dim_to_reduce_size > 1 dim_factors[dim_to_reduce].pop() return [dim_factors[i][-1] for i in range(rank)]
d2131f78d4e035d54c5275bb19a4596875cbbca4
77,252
def get_points_data(dryspells, var1, var2, weights, points): """ Extract dryspells, full time-series of var1 and var2, and averaging weights for a list of land points from input data on the full domain. The variables are typically near-surface air temperature and land surface temperature, and the weights are 1 km MODIS pixel counts. The full domain is typically global. Parameters ---------- dryspells : list of list of <event.Event> instances. Descriptions of all dry spells for each land point on the full domain. var1: MaskedArray, shape(time, land) First input variable. var2: MaskedArray, shape(time, land) Second input variable. weights : ndarray, shape(time, land) Weights used in calculating the composite means. points : list of ints Zero-based list of land points to be extracted. Returns ------- dryspells_p : list of list of <event.Event> instances. Descriptions of all dry spells for each requested land point. var1_p : list of MaskedArrays Full time series of variable 1 for each requested land point. var2_p : list of MaskedArrays Full time series of variable 2 for each requested land point. weights_p : list of ndarrays Full time series of weights for each requested land point. """ dryspells_p = [] var1_p = [] var2_p = [] weights_p = [] for p in points: dryspells_p.append(dryspells[p]) var1_p.append(var1[:, p]) var2_p.append(var2[:, p]) weights_p.append(weights[:, p]) return dryspells_p, var1_p, var2_p, weights_p
cb1dec9fc0c77158c711a2b01d8ed33af81694a6
77,254
def bdev_nvme_opal_init(client, nvme_ctrlr_name, password): """Init nvme opal. Take ownership and activate Args: nvme_ctrlr_name: name of nvme ctrlr password: password to init opal """ params = { 'nvme_ctrlr_name': nvme_ctrlr_name, 'password': password, } return client.call('bdev_nvme_opal_init', params)
e2b9ce162ebe85e72972ce5856f6c2fa412d0284
77,256
def hsv_to_rgb(hue, sat, val): # pylint: disable=too-many-return-statements """ Convert HSV colour to RGB :param hue: hue; 0.0-1.0 :param sat: saturation; 0.0-1.0 :param val: value; 0.0-1.0 """ if sat == 0.0: return val, val, val i = int(hue * 6.0) p = val * (1.0 - sat) f = (hue * 6.0) - i q = val * (1.0 - sat * f) t = val * (1.0 - sat * (1.0 - f)) i %= 6 if i == 0: return val, t, p if i == 1: return q, val, p if i == 2: return p, val, t if i == 3: return p, q, val if i == 4: return t, p, val if i == 5: return val, p, q # Will never reach here but it keeps pylint happier return val, val, val
5dbf2e1a11027c3cdf9806ee60b21f83863e8013
77,261
import random def random_list(num_vals): """num_vals is how many elements in your list. generates list start at -10 to 10 with a step of 1 - 5""" start = random.randrange(-10, 11) step = random.randrange(1, 6) return range(start, step*num_vals + start, step)
75f4bd6707b63ed673ab235d2f147223327cbe8a
77,262
def isclassdesc(desc): """Tests if a description is a class-type description.""" return desc is not None and 'parents' in desc
a8281f5a44fa06610a4986900d49a263a60f841f
77,266
import math def phi2b(d, u, phi, flow, x=1, eta_vol=1): """Calculate impeller width for a given flow number. :param d (float): diameter [m] :param u (float): blade velocity [m/s] :param phi (float): flow number :param flow (float): flow rate [m^3/s] :param x (float): blade blockage :param eta_vol (float): volumetric efficency :return b (float): impeller width [m] """ b = flow / (math.pi * d * u * phi * x * eta_vol) return b
21bcfa5240e0ea31155443d21b1f1bfe9d68b4b9
77,269
def get_index(array, value, reverse): """ Function to get the indices of two list items between which the value lies. Args: array (list): containing numerical values. value (float/int): value to be searched. reverse (bool): whether or not the range values are in reverse order. Returns: int: the two indices between which value lies. """ if reverse == True: ## loop over the array/list for i in range(0, len(array) - 1): if array[i] >= value >= array[i+1]: return i, i+1 ## loop over the array/list for i in range(0, len(array) - 1): if array[i] <= value <= array[i+1]: return i, i+1
a112bb49118a01554f1d0983328771df0e8230ab
77,271
def igetattr(obj, attr, *args): """Case-insensitive getattr""" for a in dir(obj): if a.lower() == attr.lower(): return getattr(obj, a) if args: return args[0] raise AttributeError("type object '%s' has no attribute '%s'" % (type(obj), attr))
8f18ceb65824e8780157764d86c50a7a99a4092c
77,273
def _CheckForStringViewFromNullableIppApi(input_api, output_api): """ Looks for all affected lines in CL where one constructs either base::StringPiece or std::string_view from any ipp*() CUPS API call. Assumes over-broadly that all ipp*() calls can return NULL. Returns affected lines as a list of presubmit errors. """ # Attempts to detect source lines like: # * base::StringPiece foo = ippDoBar(); # * base::StringPiece foo(ippDoBar()); # and the same for std::string_view. string_view_re = input_api.re.compile( r"^.+(base::StringPiece|std::string_view)\s+\w+( = |\()ipp[A-Z].+$") violations = input_api.canned_checks._FindNewViolationsOfRule( lambda extension, line: not (extension in ("cc", "h") and string_view_re.search(line)), input_api, None) bulleted_violations = [" * {}".format(entry) for entry in violations] if bulleted_violations: return [output_api.PresubmitError( ("Possible construction of base::StringPiece or std::string_view " "from CUPS IPP API (that can probably return NULL):\n{}").format( "\n".join(bulleted_violations))),] return []
7d2710d94de8d3325f637b4ec238ee6947488569
77,274
import re def extract_classname_vision(header_content): """ Use regex to find class names in header files of vision ops :param header_content: string containing header of a vision op IR file :return: list of vision ops found """ return re.findall(r"(?<=class )[\w\d_]+(?=Operation : )", header_content)
08387716ff889c09714443b8c8bd61d496b91acb
77,279
def get_queues(app): """Get all active queues and workers for a celery application. Unlike get_running_queues, this goes through the application's server. Also returns a dictionary with entries for each worker attached to the given queues. :param `celery.Celery` app: the celery application :return: queues dictionary with connected workers, all workers :rtype: (dict of lists of strings, list of strings) :example: >>> from merlin.celery import app >>> queues, workers = get_queues(app) >>> queue_names = [*queues] >>> workers_on_q0 = queues[queue_names[0]] >>> workers_not_on_q0 = [worker for worker in workers if worker not in workers_on_q0] """ i = app.control.inspect() active_workers = i.active_queues() if active_workers is None: active_workers = {} queues = {} for worker in active_workers: for my_queue in active_workers[worker]: try: queues[my_queue["name"]].append(worker) except KeyError: queues[my_queue["name"]] = [worker] return queues, [*active_workers]
c83432ee49df4976f8a212434c7f69f697825005
77,284
from datetime import datetime import pytz def set_timezone(time: datetime) -> datetime: """ Get a datetime input, add timezone then return a proper datetime object """ europe_paris = pytz.timezone('Europe/Paris') return time.astimezone(europe_paris)
e1d2e8997b16648a326f2cc18927e8a1ff9f54c1
77,286
def _screen_file_extensions(preferred_extension): """ Return a prioritized list of screen file extensions. Include .ui & .py files (also .adl files if adl2pydm installed). Prefer extension as described by fname. """ extensions = [".py", ".ui"] # search for screens with these extensions try: extensions.append(".adl") except ImportError: pass # don't search twice for preferred extension if preferred_extension in extensions: extensions.remove(preferred_extension) # search first for preferred extension extensions.insert(0, preferred_extension) return extensions
34cc1546acdb01bd1333970ad4ad361b560f4d71
77,288
def m32gal(m3): """m^3 -> gal""" return 264.17205*m3
c7c997e8443b39f4bd0c990958c861a1f4f16f92
77,293
def BFS_dist(sink, distList): """ returns the distance to sink given distList. Assumes source is distList[0] returns 1 more than length of distlist if sink not found in distlist """ for i in range(len(distList)): if sink in distList[i]: return i ###else return len(distList) + 1
139220147499ce79174e2d4a3284470b91026b9c
77,297
from typing import Dict from typing import Optional def get_query_from_dict(data: Dict[str, Optional[str]]) -> str: """ Prepare a query to be passed to ES based on incoming key-value pairs in a dict :param data: key-value pairs to use for searching in ES :return: query string """ return " AND ".join( [f"{key}:{value}" for key, value in data.items() if value is not None] )
b4959b643ed7fb29a84a4374e081a3db1e8de653
77,299
def remove_options(args: dict) -> dict: """ Strips options part of doc which gets parsed by docopts - args: the dictionary of arguments produced by docopts - returns: a dictionary with just the useful arguments in it """ new_args = dict() for arg in args.keys(): if arg == "Options:": break new_args[arg] = args[arg] return new_args
6d8f5409911715b5ab01ee8f78876637fb75b5e5
77,301
import re def matchline(line): """ >>> matchline("{{teet}}") 'teet' >>> matchline("teet") """ rg = re.compile("{{(\S*)}}") m = rg.match(line.strip()) if m: return m.group(1) else: return
7db884f164197213497b27050dacd0ff90c1b29e
77,305
def intdiv(p, q): """ Integer divsions which rounds toward zero Examples -------- >>> intdiv(3, 2) 1 >>> intdiv(-3, 2) -1 >>> -3 // 2 -2 """ r = p // q if r < 0 and q*r != p: r += 1 return r
dc1b507f4a5e71f93a3145bbffe8076ab43cf117
77,306
def render_srm(color_srm): """Convert the SRM to a valid HTML string (if known)""" if not color_srm: return '#ffffff' # round the color to an int and put it in the inclusive range [1, 30] int_color = min([int(color_srm), 30]) if int_color < 1: return '#ffffff' # source: # https://www.homebrewtalk.com/forum/threads/ebc-or-srm-to-color-rgb.78018/#post-820969 color_map = { 1: '#F3F993', 2: '#F5F75C', 3: '#F6F513', 4: '#EAE615', 5: '#E0D01B', 6: '#D5BC26', 7: '#CDAA37', 8: '#C1963C', 9: '#BE8C3A', 10: '#BE823A', 11: '#C17A37', 12: '#BF7138', 13: '#BC6733', 14: '#B26033', 15: '#A85839', 16: '#985336', 17: '#8D4C32', 18: '#7C452D', 19: '#6B3A1E', 20: '#5D341A', 21: '#4E2A0C', 22: '#4A2727', 23: '#361F1B', 24: '#261716', 25: '#231716', 26: '#19100F', 27: '#16100F', 28: '#120D0C', 29: '#100B0A', 30: '#050B0A', } return color_map[int_color]
ae4f81ad1de85abf4509fad1a7281b03146b72e9
77,313
def self_affine_psd(q, pref, hurst, onedim=False): """Ideal self-affine power spectrum, dependent only on prefactor and Hurst exponent.""" exp = -2 * (hurst + 1) if onedim: exp = -1 - 2 * hurst return pref * q**exp
c8208ca4510085fab0c10eac5a63e885c5f1834a
77,314
def get_session_duration(user_session): """Get session duration.""" return user_session.events[-1].ts - user_session.events[0].ts
fcc67595954541e6fafb158808522cab23d3458b
77,315
def string_from_prompts_array(arr): """ Concatinates a list of prompts into a string. Used in development to separate prompts with a bar. Args arr: An array of prompts. Returns A concatinated string. """ prompts_string = '|'.join(arr) return prompts_string
4e79e4d7917d6894f60dcc1a5e3d4a6cb8e222b3
77,320
def _length_of_batch(batch): """Get the number of samples in the mini-batch `batch`. `batch` can be: - a NumPy array, in which case `len(batch)` (size of first axis) will be returned - a tuple, in which case `_length_of_batch` will be invoked (recursively) on the first element As a consequence, mini-batches can be structured; lists and tuples can be nested arbitrarily deep. Parameters ---------- batch: tuple or NumPy array a mini-batch Returns ------- int: the number of samples in the mini-batch """ if isinstance(batch, tuple): return _length_of_batch(batch[0]) else: return len(batch)
56f1eee80700be4441aa76068a4851002256b53b
77,326
def targetMatches(targetPathParts, system, craft, target, camera): """ Does the given target path match the given values? Where target path part is None, don't filter that value. eg if targetPathParts=[None,None,None,None], will always return True. """ # targetPathParts = [s.title() for s in targetPathParts if s] matches = True if targetPathParts: pathSystem, pathCraft, pathTarget, pathCamera = targetPathParts if (pathSystem and pathSystem.title()!=system): matches = False if (pathCraft and pathCraft.title()!=craft): matches = False if (pathTarget and pathTarget.title()!=target): matches = False if (pathCamera and pathCamera.title()!=camera): matches = False return matches
e16977575494ef421331cd4432dd4b5d7cfba0c0
77,330
import pickle def load(path): """ Unpickle the file located at path. :param path: The path to the pickled file. :return: Returns the object hierarchy stored in the file. """ return pickle.load(open(path, 'rb'))
6d2550b882a67b9f256587c7fd2403ace786e540
77,339
from typing import Iterable def stringify(symbols='•꞉⋮'): """ Construct a function to convert a set (of sets [of sets {...}]) into a string. Parameters ---------- symbols : str The symbols to utilize to separate elements. Returns ------- strinifier : func A function which stringifies. """ def stringifier(things): """ Convert a set (of sets [of sets {...}]) into a string. Parameters ---------- things : (frozen)set The set (of sets [of sets {...}]) to stringify. Returns ------- string : str The string representation of `things`. """ try: things = list(things) except TypeError: # pragma: no cover return str(things) try: if isinstance(things[0], Iterable) and not isinstance(things[0], str): stringer = stringify(symbols[1:]) string = symbols[0].join(sorted((stringer(thing) for thing in things), key=lambda t: (-len(t), t))) else: raise IndexError except IndexError: string = ''.join(map(str, sorted(things))) return string if string else '∅' return stringifier
79d077206b3f7b1b38309687439758ff287eed84
77,342
def sortNeighbors(neighbors, similarities, useTuples=True, verbose=True): """ Sorts each list of neighbors in decreasing order according to similarities between users :param neighbors: List of lists indicating the neighbors of each user, as returned by buildNeighbors() :param similarities: Similarity matrix between users, as returned by buildUsersGraph() :param useTuples: Boolean parameter, indicating if the function should return a list of list of tuples. :param verbose: If set to True, status prints are made during the execution of the code :return: A list of lists of tuples, with for each user its neighbors in decreasing order of similarity, in the form of (neighbor index, similarity) tuples. Only neighbors with positive similarity are keeped. If useTuples is set to False, the output is a simple list of lists, containing only neighbor indices instead of tuples to save memory Note : can be used in the same way for the films point of view based model """ sorted_neighbors = [] nU = similarities.shape[0] for ui in range(nU): if verbose and ui % 100 == 0: print(ui) n = [(uj, similarities[ui, uj]) for uj in neighbors[ui] if similarities[ui, uj] > 0] n = sorted(n, key=lambda x: x[1], reverse=True) if not useTuples: n1 = [] for i, j in n: n1.append(i) sorted_neighbors.append(n1) # To save memory else: sorted_neighbors.append(n) return sorted_neighbors
60566eb05959d40446da383750a717c988a8010e
77,344
def intersect_using_spatial_index(source_gdf, source_index, tile): """Conduct spatial intersection using spatial index for candidates GeoDataFrame to make queries faster. Parameters ---------- source_gdf : GeoDataFrame [description] source_index : geopandas.sindex.SpatialIndex spatial index of the source tile : Shapely geometry geometry for which intersections with the sources are found Returns ------- GeoDataFrame intersections between source_gdf and tile geometry """ possible_matches_index = [] bounds = tile.bounds c = list(source_index.intersection(bounds)) possible_matches_index += c # Get unique candidates unique_candidate_matches = list(set(possible_matches_index)) possible_matches = source_gdf.iloc[unique_candidate_matches] result = possible_matches.loc[possible_matches.intersects(tile)\ ^ possible_matches.touches(tile)] return result
2a45fff6fbf57f1c7d4b32dcc3028b4745b22b9e
77,347
def kap(n): """ Execute and return result of a Kaprekar's routine step for number n. >>> kap(6174) 6174 >>> kap(2341) 3087 >>> kap(3087) 8352 >>> kap(8352) 6174 """ n = format(n, '04d') small = int(''.join(sorted(n))) big = int(''.join(sorted(n, reverse=True))) return big - small
67519104d4b620587b6a5a55c45524ceb31b7f9f
77,351
def f_sma(close_prices, window): """Calculates standard moving average SMA). This function takes historical data, and a moving window to calculate SMA. As the moving average takes previous closing prices into account, its length will be len(candles) - window Args: close_prices (list of float): A list of close prices for each period. window (int): The moving window to take averages over. Returns: list of float: A list of SMAs """ sma = [] i = 0 n = len(close_prices) - window + 1 for i in range(n): sma.append(sum(close_prices[i:i+window])/window) i += 1 return sma
daf935a36207fa94a2192a3289bee231b2c2e175
77,356
from datetime import datetime from typing import Optional def as_aware_datetime(value: datetime | str) -> Optional[datetime]: """Convert the value from a string to a timezone aware datetime.""" if isinstance(value, str): # fromisoformat() does not handle the "Z" suffix if value.upper().endswith('Z'): value = value[:-1] + '+00:00' value = datetime.fromisoformat(value) return value
1f42287971d74a556d3f2f695b2793531d2f1755
77,357
def get_naive_features(data): """Calculate the mean, min, max, std, quartiles, and range of the data Args: data: pandas DataFrame data to calculate statistics of. Each data point is a row. Returns: pandas DataFrame Columns are 'min', 'max', 'range', 'mean', '25%', '50%', and '75%' """ result = data.transpose().describe().transpose() result = result.drop('count', axis=1) result['range'] = result['max'] - result['min'] return result
8bfc19978bc6e018335704a1b1f9bed054ee79ae
77,363
def chr_split_list(split_headers): """ Gets chromosome info from split_headers. Args: split_headers (list): header list split into parts Returns: chr_list (list): list of chromosome values """ chr_list = [] for item in split_headers: if item[0]: new_item = item[0].split(":") chr_list.append(new_item) return chr_list
726b36e92654addeb326449aa2f817a22be1af70
77,367
import hashlib def digest_id(wordage): """ return a deterministic digest of input the 'b' is an experiment forcing the first char to be non numeric but valid hex; which is in no way required for RDF but may help when using the identifier in other contexts which do not allow identifiers to begin with a digit :param wordage the string to hash :returns 20 hex char digest """ return 'b' + hashlib.sha1(wordage.encode('utf-8')).hexdigest()[1:20]
50d06013402a8549b064a70a3559d154f76a538c
77,370
def rsqrt_schedule( init_value: float, shift: int = 0, ): """Applies a reverse square-root schedule. The reverse square root schedule is simply `lr = init_value / sqrt(step)`. Args: init_value: Base learning rate (before applying the rsqrt schedule). shift: How many steps the rsqrt should be shifted. Shifting the rsqrt schedule makes it less steep in the beginning (close to 0). Returns: A schedule `count -> learning_rate`. """ def schedule(count): return init_value * (count + shift)**-.5 * shift**.5 return schedule
385c654cf7ba0e4d6bc2a2720ef408f93732ce5c
77,373
from typing import List import collections def remove_duplicates(list_to_prune: List) -> List: """Removes duplicates from a list while preserving order of the items. :param list_to_prune: the list being pruned of duplicates :return: The pruned list """ temp_dict = collections.OrderedDict() for item in list_to_prune: temp_dict[item] = None return list(temp_dict.keys())
bd6dac12e3d38b912fc64c7d369c9b716bab915c
77,374
def make_nofdel(name): """Make a deleter for `property()`'s `fdel` param that raises `AttributeError` Args: name (str): property name Raises: AttributeError: always raises exception Returns: function: A deleter function """ def fdel(self): raise AttributeError("Property '%s' is not deletable by %r" % (name, self,)) return fdel
2e44f43f48a6a22211bc7e268fafa7ff84cc3d66
77,375
def is_folder(item): """ Check the item's 'type' to determine whether it's a folder. """ return item['type'] == "folder"
79f24b606358d41f51b1a03e39d60ffca1add8f6
77,380
import re def is_version_base_valid(dotted_ver_no_dev): """ Checks if the base version is a valid format. This should be roughly semver per the notes in `version.py`. Args: dotted_ver_no_dev (str): The version string to check, such as `v1.2.3-4`. This should exclude anything after the `+` sign if applicable. Returns: (bool): True if version string is a valid format; False otherwise. """ return re.match(r'^v[0-9x]+.[0-9x]+.[0-9x]+(-[0-9a-z]+)?$', dotted_ver_no_dev) is not None
1e5cf4943c1dd84cdf75a4dbabbc867d77ddeeb3
77,389
import hashlib def get_file_hash(path): """ Return the cryptographic hash of the contents of the file located at the provided `path`. """ hash = hashlib.blake2b() with open(path, "rb") as file: while chunk := file.read(8192): hash.update(chunk) return hash
83d208efd87bd58a554343c8b0e3d54b038b1a62
77,390
def tracks_of_epoch(df, column=str, year_1=int, year_2=int): """ Cuenta los canciones que hay entre unos años :param df: dataframe a anlizar :param column: columna a seleccionar :param year_1: primer año a buscar, inclusive :param year_2: último año a buscar inclusive :return: int, Las canciones que hay entre esos años """ # Utilizamos la función between para obtener uan booleano # sobre si la fila cumple las condiciones de estar entre # dos valores. # Con sum sumamos los True = 1 return sum(df[column].between(year_1, year_2))
3c7dd30f42a3125f6f24e7a6bce7669b6fe228b7
77,392
def Checksum(payload, payload_size): """Calculates the checksum of the payload. Args: payload: (string) The payload string for which checksum needs to be calculated. payload_size: (int) The number of bytes in the payload. Returns: The checksum of the payload. """ checksum = 0 length = min(payload_size, len(payload)) for i in range (0, length): checksum += ord(payload[i]) return checksum
60e26e0c68ba82e26fb484897fa7df9992611f96
77,394
def load_file(filename: str) -> list: """Load the list of crab positions from filename :param filename: Location of the input file :return: List of display entries """ entries = list() with open(filename) as f: for line in f: entries.append( (line.split('|')[0].split(), line.split('|')[1].split()) ) return entries
785c681f53426ad071240e1f6cfd847970dc2190
77,399
def dependency_sort(assignments): """ We define a dependency sort as a Tarjan strongly-connected components resolution. Tarjan's algorithm happens to topologically sort as a by-product of finding strongly-connected components. Use this in code generators to sort assignments in dependency order, if there are circular dependencies. It is slower than ``topological_sort``. In the sema model, each node depends on types mentioned in its ``descendants``. The model is nominally a tree, except ``descendants`` can contain node references forming a cycle. Returns a list of tuples, where each item represents a component in the graph. Ideally they're one-tuples, but if the graph has cycles items can have any number of elements constituting a cycle. This is nice, because the output is in perfect dependency order, except for the cycle components, where there is no order. They can be detected on the basis of their plurality and handled separately. """ # Build reverse-lookup table from name -> node. assignments_by_name = {a.reference_name(): a for a in assignments} # Build the dependency graph. graph = {} for assignment in assignments: references = assignment.references() graph[assignment] = [assignments_by_name[r] for r in references if r in assignments_by_name] # Now let Tarjan do its work! Adapted from here: # http://www.logarithmic.net/pfh-files/blog/01208083168/tarjan.py index_counter = [0] stack = [] lowlinks = {} index = {} result = [] def strongconnect(node): # Set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors of `node` successors = graph.get(node, []) for successor in successors: if successor not in lowlinks: # Successor has not yet been visited; recurse on it strongconnect(successor) lowlinks[node] = min(lowlinks[node], lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node], index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) result.append(component) for node in sorted(graph.keys(), key=lambda a: a.reference_name()): if node not in lowlinks: strongconnect(node) return result
d9b0144474733d33ad89deeb215e057b3fa6792f
77,401
def mysql_now(element, compiler, **kw): """Implement the MySQL now() using the TIMESTAMP datatype.""" return "CURRENT_TIMESTAMP()"
5b8e5fd0dbc116770ad603e5a71961ef53af1e1d
77,402
def get_poly_bounds(poly1): """ Find the bounds of the Polygon. Parameters ------- poly1 : Polygon contour with shapely polygon format Returns ------- min_h : int minimum y coordinate of polygon min_w : int minimum x coordinate of polygon max_h : int maximum y coordinate of polygon max_w : int maximum x coordinate of polygon """ min_x, min_y, max_x, max_y = poly1.bounds min_h, min_w = min_y, min_x max_h, max_w = max_y, max_x return min_h, min_w, max_h, max_w
5ed309a0d29569e3874a3b175c5f3b206245c389
77,406
def get_trail_by_arn(cloudtrail_client, trail_arn): """Gets trail information based on the trail's ARN""" trails = cloudtrail_client.describe_trails()['trailList'] for trail in trails: if trail.get('TrailARN', None) == trail_arn: return trail raise ValueError('A trail could not be found for %s' % trail_arn)
b246de07561a2c766c480cb3135a3aa3bff0ab39
77,407
def tanh_prime(Y): """define the derivative of the activation function tanh""" return 1 - Y ** 2
cc7468345262ce5d7ae8f57e0169696e5729008e
77,408
import re def remove_duplicated_whitespaces(whitespaced: str) -> str: """ Clean up an input string out of any number of repeated whitespaces. :param whitespaced: :return: """ cleaner = re.compile(r"\s+") return cleaner.sub(" ", whitespaced)
86b314c2c025db2444174fd3c6bed8cb867b5c76
77,411
import itertools def bruteforce(charset, minlength, maxlength): """Efficient dumb bruteforcer generator. Example: # Generate every string 3 to 10 characters long, from the ascii_lowercase charset. for word in bruteforce(string.ascii_lowercase, 3, 10): print(word) """ return (''.join(candidate) for candidate in itertools.chain.from_iterable(itertools.product(charset, repeat=i) for i in range(minlength, maxlength + 1)))
3bd9cef1a7a328c690ee9818b381ecd9ee20be76
77,412
def try_int(n): """ Takes a number *n* and tries to convert it to an integer. When *n* has no decimals, an integer is returned with the same value as *n*. Otherwise, a float is returned. """ n_int = int(n) return n_int if n == n_int else n
9a73d8f2e8410c1784b9580afbfb50fc80b2a615
77,416
import inspect def add_argument_kwargs(params, arg): """ Build kwargs for parser.add_argument function """ valid_hints = [int, float, str, bool] fn_default = params[arg].default fn_annotation = params[arg].annotation # special case, bool with default value becomes a flag if fn_annotation is bool and fn_default is not inspect._empty: kwargs = {'action': 'store_true' if not fn_default else 'store_false'} elif fn_annotation in valid_hints: kwargs = {'type': fn_annotation, 'default': fn_default} else: kwargs = {'default': fn_default} return kwargs
280c83e211785521319ddf9c8b876ad7f652fa81
77,417
def extract_info_from_response(response, msg): """ Extract the interesting information from a HTTPResponse. """ msg.curi.status_code = response.code msg.curi.req_header = response.request.headers msg.curi.rep_header = response.headers msg.curi.req_time = response.request_time msg.curi.queue_time = response.time_info["queue"] msg.curi.content_body = response.body return msg
73e2ad2ce3420862d92963d0f58b87953fb9d102
77,421
def getitem(value, key, *default): """ gets an item, like getattr :Example: --------- >>> a = dict(a = 1) >>> assert getitem(a, 'a') == 1 >>> assert getitem(a, 'b', 2) == 2 >>> import pytest >>> with pytest.raises(KeyError): >>> getitem(a, 'b') """ if len(default): try: return value[key] except Exception: return default[0] else: return value[key]
d57a7944ac618a2777f177cc2d63da2f0df9649f
77,424
def encipher_rsa(pt, puk): """ In RSA, a message `m` is encrypted by computing `m^e` (mod `n`), where ``puk`` is the public key `(n,e)`. Examples ======== >>> from sympy.crypto.crypto import encipher_rsa, rsa_public_key >>> p, q, e = 3, 5, 7 >>> puk = rsa_public_key(p, q, e) >>> pt = 12 >>> encipher_rsa(pt, puk) 3 """ n, e = puk return pow(pt, e, n)
01ba60b355cdb97eb1e7528f89ea0bbec53cd0dd
77,427
def create_guess_char_indices_dict(guesses): """ Loops over each guess, and then loops over each character in the guess. This method finds the index of the character in the guess and then appends it to a list of indices which are set as the value of the character (key) in the result map. Each non-underscore character in the guess is a key, and each key has a list of indices as its value. :param guesses: a string representing a guess. e.g. __c__ represents a guess with c in the 2nd index position. :return: a dictionary containing chars as keys and a list of indices from the guess as the value of each key """ char_indices = dict() for guess in guesses: for i in range(len(guess)): char = guess[i] if not char == '_': if char in char_indices.keys(): char_indices[char].append(i) else: char_indices[char] = [i] return char_indices
54834564d54aaab6e923e64cdc55f689146cb6ed
77,435
def spacify(string): """Add 2 spaces to the beginning of each line in a multi-line string.""" return " " + " ".join(string.splitlines(True))
2403f3fc1f193ae59be2dcfe2ac4911d542701cd
77,442
def adjust_fields(prefix, task): """ Prepend the prefix to a task's fields :param prefix: string prepended to task fields :type prefix: str :param task: a JSOn task object from task.json :type task: dict :return: a modified JSON task object from task.json :rtype: dict """ output_task = {} for field, content in task.items(): output_task[prefix + field] = content return output_task.copy()
21fefa294ee2c10dd2388bd70be11828753df7c9
77,452
def get_ind_mat_average_uniqueness(ind_mat): """ Snippet 4.4. page 65, Compute Average Uniqueness Average uniqueness from indicator matrix :param ind_mat: (np.matrix) indicator binary matrix :return: (float) average uniqueness """ concurrency = ind_mat.sum(axis=1) uniqueness = ind_mat.T / concurrency avg_uniqueness = uniqueness[uniqueness > 0].mean() return avg_uniqueness
b80d54cbc0cdd00a677e4f53635a2b9e73556756
77,456
def write_to_text_file(file_path, file_contents): """ Writes the given file_contents to the file_path in text format if current user's permissions grants to do so. :param file_path: Absolute path for the file :param file_contents: List<str> List of lines :return: void :rtype: void :raises: This method raises OSError if it cannot write to file_path. """ with open(file_path, mode='w', encoding='utf-8') as handler: for line in file_contents: handler.write("%s\n" % line) return file_path
5390092f05c1a449567ae701c2ca08b0ef4405c6
77,457
def _pyval_field_major_to_node_major(keys, values, shape): """Regroup each field (k, v) from dict-of-list to list-of-dict. Given a "field-major" encoding of the StructuredTensor (which maps each key to a single nested list containing the values for all structs), return a corresponding "node-major" encoding, consisting of a nested list of dicts. `shape` is used to determine how far to recurse; and if `keys` is empty it is used to determine the sizes for empty lists. Args: keys: The field names (list of string). values: The field values (list of python values). Must have the same length as `keys`. shape: A tuple specifying the shape of the `StructuredTensor`. Returns: A nested list of dict. """ if not shape: return dict(zip(keys, values)) elif not keys: if shape[0] in (0, None): return [] else: return [_pyval_field_major_to_node_major((), (), shape[1:])] * shape[0] else: nvals = len(values[0]) assert all(nvals == len(values[i]) for i in range(1, len(values))) return [ _pyval_field_major_to_node_major(keys, value_slice, shape[1:]) for value_slice in zip(*values) ]
b0164fa0a60ee051cd045a731efc22e34dc6f18a
77,459
import pkg_resources def asset_exists(name: str) -> bool: """Return True only if the desired asset is known to exist.""" if name: return pkg_resources.resource_exists(__name__, name) else: return False
96cf5c139d979d4a7376cf186c6e7bacd387f3d9
77,471
def kirchhoff_voltage_law_rule(mod, c, tmp): """ **Constraint Name**: TxDcopf_Kirchhoff_Voltage_Law_Constraint **Enforced Over**: CYCLES_OPR_TMPS The sum of all potential difference across branches around all cycles in the network must be zero in each operational timepoint. In DC power flow we assume all voltage magnitudes are kept at nominal value and the voltage angle differences across branches is small enough that we can approximate the sinus of the angle with the angle itself, i.e. sin( theta) ~ theta. We can therefore write KVL in terms of voltage angles as follows: ..math:: \\sum_{l} C_{l,c} * \theta_{l} = 0 \\forall c = 1,...,L-N+1 Using the linearized relationship between voltage angle differences across branches and the line flow, :math:`\theta_{l} = x_{l} * f_{l}`, we can factor out the voltage angles and write KVL purely in terms of line flows and line reactances: .. math:: C_{l,c} * x_{l} * f_{l} = 0 \\forall c = 1,...,L-N+1 The latter equation is enforced in this constraint. Note: While most power flow formulations normalize all inputs to per unit (p.u.) we can safely avoid that here since the normalization factors out in the equation, and it is really just about the relative magnitude of the reactance of the lines. Source: Horsch et al. (2018). Linear Optimal Power Flow Using Cycle Flows """ return sum( mod.TxDcopf_Transmit_Power_MW[l, tmp] * mod.tx_dcopf_cycle_direction[mod.period[tmp], c, l] * mod.tx_dcopf_reactance_ohms[l] for l in mod.TX_DCOPF_IN_PRD_CYCLE[mod.period[tmp], c] ) == 0
0214daadd18f75272003439c21c6acd0f38030e4
77,473
def _normalise_drive_string(drive): """Normalise drive string to a single letter.""" if not drive: raise ValueError("invalid drive letter: %r" % (drive,)) if len(drive) > 3: raise ValueError("invalid drive letter: %r" % (drive,)) if not drive[0].isalpha(): raise ValueError("invalid drive letter: %r" % (drive,)) if not ":\\".startswith(drive[1:]): raise ValueError("invalid drive letter: %r" % (drive,)) return drive[0].upper()
fcb23da88a8a672ec52e568844a8f87baaba9865
77,475
def ensure_service_chains(service_groups, soa_dir, synapse_service_dir): """Ensure service chains exist and have the right rules. service_groups is a dict {ServiceGroup: set([mac_address..])} Returns dictionary {[service chain] => [list of mac addresses]}. """ chains = {} for service, macs in service_groups.items(): service.update_rules(soa_dir, synapse_service_dir) chains[service.chain_name] = macs return chains
643eb364cf56202fe45e62eae26d37f8fa3fe597
77,480
from typing import Union import pathlib def clean_mirror_path(mirror: str) -> Union[str, pathlib.Path]: """Convert the user input from command line to an acceptable format for the app. Note: If the user provided an URL, it will remove any trailing '/'. :param mirror: User input :return: Either a Path object or a string """ # First, detect if the string is an url mirror_path: Union[str, pathlib.Path] = ( pathlib.Path(mirror) if "http" not in mirror else mirror ) # Remove trailing '/' if needed if isinstance(mirror_path, str) and mirror_path.endswith("/"): mirror_path = mirror_path[:-1] return mirror_path
eb99c6993e26565207a173be4b7d24b8ab9569a1
77,481
def kron_d(i, j): """ The Kronecker delta. """ return 1 if i == j else 0
4b4bfe35bc4407ebde917b70d4052259c984c0d4
77,484
def _similar_compound_to_str(sdict, compound_idx): """ inputs: sdict: a dict returned from get_similar_compounds() compound_index: index of current compound being considered returns: string with only non-breaking spaces or '*' if dict represents current compound """ if sdict['index'] == compound_idx: return '*' return f"{sdict['index']}, {sdict['label']} {{RT-{sdict['rt'].rt_peak:.2f}}}".replace(' ', '\xa0')
d9a6ae4e229e0e30e88c9dff6acc14517f8cb148
77,490
from typing import List import re def compress(s: str) -> List[str]: """Compress words and digits to one list.""" return re.split(r'(\W)', s)
c28cc602a6cd3ea219c0ee940180bdf1b9c9cc28
77,491
import pickle def load(filepath): """Return a pickled object at `filepath`""" pkl = open(filepath, 'rb') obj = pickle.load(pkl) pkl.close() return obj
baac00bedaa92711dfa480eb900f567e9ca9cbe1
77,492
def in_group(user, group_name): """Return True if the user is in the group, False otherwise.""" return user.groups.filter(name=group_name).exists()
e3b17ea56783c5512ddf5217801d695371499ca2
77,495
def _set_filter(name: str): """Filter is used to filter loguru.logger by the logger type, allowing to create multiple 'virtual' loggers. 'logger_name' is the key used on log record 'extra' fields to identify the type of logger, thus used on logger.bind calls to fetch each virtual logger. """ def record_filter(record): return record["extra"].get("logger_name") == name return record_filter
4949e6fede2ffd2988ccee55a7e9ec5ebbccd8a3
77,501
def prediction2text(prediction): """Converts a single prediction of the model to text.""" return 'Да' if prediction > 0.5 else 'Нет'
bc7825525b0e4678a89b429b2769e3fccb31fabd
77,507
import re def filter_by_category(result, name_filter=None): """ Filter by category name the result returned from ZIA. :param result: Dict of categories from ZIA response. :param name_filter: A regex string used to filter result. :return: Filtered result. """ categories = result.get("categories", []) regex = r'{}'.format(name_filter) # Add category total and filtered counts to result. result.update({ "category_counts": { "total": len(categories), "filtered": len(categories) } }) if name_filter: filtered_categories = [c for c in categories if re.search(regex, c.get("configuredName", ''), re.I)] result["categories"] = filtered_categories result["category_counts"]["filtered"] = len(filtered_categories) return result
da5e1c716a86907bb953abf8821962e76094de9d
77,509
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): """Check termination condition for nonlinear least squares.""" ftol_satisfied = dF < ftol * F and ratio > 0.25 xtol_satisfied = dx_norm < xtol * (xtol + x_norm) if ftol_satisfied and xtol_satisfied: return 4 elif ftol_satisfied: return 2 elif xtol_satisfied: return 3 else: return None
ee078b7df7a19eed98f05860640121ac7fd6c4ad
77,510