content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import copy def validate_variable_xpaths(sed_variables, model_etree): """ Get the names OpenCOR uses to refer to model variable Args: model_etree (:obj:`lxml.etree._ElementTree`): element tree for model sed_variables (:obj:`list` of :obj:`Variable`): SED variables Returns: :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it """ opencor_variable_names = {} for sed_variable in sed_variables: if not sed_variable.target: msg = 'Symbols are not supported.' raise NotImplementedError(msg) namespaces = copy.copy(sed_variable.target_namespaces) namespaces.pop(None, None) obj_target, _, attrib_target = sed_variable.target.partition('/@') xml_objs = model_etree.xpath(obj_target, namespaces=namespaces) if len(xml_objs) == 0: msg = ( 'XPath targets of variables must reference unique observables. ' 'The target `{}` of variable `{}` does not match any model elements.' ).format(sed_variable.target, sed_variable.id) raise ValueError(msg) if len(xml_objs) > 1: msg = ( 'XPath targets of variables must reference unique observables. ' 'The target `{}` of variable `{}` matches multiple model elements.' ).format(sed_variable.target, sed_variable.id) raise ValueError(msg) xml_obj = xml_objs[0] names = [] while True: name = xml_obj.attrib.get('name', None) names.append(name) xml_obj = xml_obj.getparent() ns, _, tag = xml_obj.tag[1:].partition('}') if not name or not ns.startswith('http://www.cellml.org/cellml/'): msg = 'Target `{}` of variable `{}` is not a valid observable.'.format(sed_variable.target, sed_variable.id) raise ValueError(msg) if tag == 'model': break if attrib_target: names.insert(0, attrib_target) opencor_variable_names[sed_variable.id] = '/'.join(reversed(names)) return opencor_variable_names
7e83a4e6ecb7037bb6c5e9f3b9c93afef1a83e5b
513,899
def bezier_point(cps, t): """ Cubic Bezier curve interpolation B(t) = (1-t)^3 * P0 + 3t(1-t)^2 * P1 + 3t^2(1-t) * P2 + t^3 * P3 """ p = ((1 - t) ** 3) * cps[0, :] p += 3 * t * ((1 - t) ** 2) * cps[1, :] p += 3 * (t ** 2) * (1 - t) * cps[2, :] p += (t ** 3) * cps[3, :] return p
ca5644539ac0c239ce028f992a07965d92132b8c
655,844
def extractScopeLFN(scope_lfn): """ Extract the scope and LFN from the scope_lfn string """ # scope_lfn = scope:lfn -> scope, lfn _dummy = scope_lfn.split(':') return _dummy[0], _dummy[1]
91d4672624b2b6f5df4f132f4da6d9016747cce5
680,754
def lower_case(doc): """Returns the lowercase form of a token""" token_list = [] for token in doc: token_list.append(token.lower_) return token_list
25cf40a6e3f39b3306667688508d498181a2fb82
69,511
from typing import Tuple import hashlib def hashfile(path: str, blocksize: int = 65536) -> Tuple[str, str]: """Calculate the MD5 hash of a given file Args: path ()str, os.path): Path to the file to generate a hash for blocksize (int, optional): Memory size to read in the file Default: 65536 Returns: hash (str): The HEX digest hash of the given file path (str): The filepath that generated the hash """ # Instatiate the hashlib module with md5 hasher = hashlib.md5() # Open the file and instatiate the buffer f = open(path, "rb") buf = f.read(blocksize) # Continue to read in the file in blocks while len(buf) > 0: hasher.update(buf) # Update the hash buf = f.read(blocksize) # Update the buffer f.close() return hasher.hexdigest(), path
e38e6622534f27bed109a2e2b71373503ca4e7b0
7,483
def _get_valid_indexes( indexes, offset, ): """Restrict to indexes so that i+offset still is a valid index.""" if offset == 0: return indexes return indexes[:, :-offset]
628129d5fb20e6a94183f70036e7eb082553d4b8
223,451
from itertools import chain def get_measures(clazz): """ Gets all the measures of a clazz. :param clazz: Clazz. :return: List of measures. """ is_property = lambda v: isinstance(v, property) is_public = lambda n: not n.startswith('_') is_valid = lambda n, v: is_public(n) and is_property(v) measures = sorted(list(chain(*[[n for n, v in vars(c).items() if is_valid(n, v)] for c in clazz.__mro__]))) return measures
36fbd6849b6c7b52c27f1192237c3c979e9bf286
217,430
import string def column_number_to_column_id(number): """ Takes a one-based index and converts it to a column identifier string such as used in Excel. Examples: 0 => A 25 => Z 26 => AA 703 => AAB Note that this is similar to converting numbers to base-26, but not quite the same — this numbering scheme has no concept of 0. We go from "Z" to "AA" which is like going from 9 to 11 with no intervening 10. Only works for positive integers. """ if not isinstance(number, int) or number <= 0: raise AttributeError( "column_number_to_column_id requires a non-negative int") digits = string.ascii_uppercase parts = [] while number > 0: number, mod = divmod(number - 1, len(digits)) parts.insert(0, digits[mod]) return ''.join(parts)
94effcd908f958ad8e283815d9a0cec3f3a468bc
492,492
def join_endpoints(endpoints, A, B): """ Join B's segment onto the end of A"s and return the segments. Maintain endopints dict. """ Asegment, Bsegment = endpoints[A], endpoints[B] # A must be at the end of Asegment if Asegment[-1] is not A: Asegment.reverse() # B must be at the beginning of Bsegment if Bsegment[0] is not B: Bsegment.reverse() Asegment.extend(Bsegment) # connect del endpoints[A], endpoints[B] # A and B are no longer endopints # make two new segments endpoints[Asegment[0]] = Asegment endpoints[Asegment[-1]] = Asegment return Asegment, endpoints
f7e55d72dab73be786c004c88813c2bb0471ba26
126,603
def valid_name(name): """ Replace dashes and underscores by spaces, and lowercase. >>> valid_name('TALOS_Metapkg-ros_control_sot') 'talos metapkg ros control sot' """ return name.replace('_', ' ').replace('-', ' ').lower()
86c30c01a40c9baf70c46bb8d5773d001fbdec7a
494,045
def flatten_dataframe_for_JSON(df): """Flatten a pandas dataframe to a plain list, suitable for JSON serialisation""" return df.values.flatten().tolist()
ef4ad28904f083f1d86daefab25ce13faf36a344
686,452
def get_valid_value(series, last=True): """get the first/last not nan value of pd.Series with single level index Parameters ---------- series : pd.Series series should not be empty last : bool, optional whether to get the last valid value, by default True - if last is True, get the last valid value - else, get the first valid value Returns ------- Nan | float the first/last valid value """ return series.fillna(method="ffill").iloc[-1] if last else series.fillna(method="bfill").iloc[0]
3cafd7c0cbcfe4bd1f5253e2416fbad129cbbfea
383,120
def lunderize(title): """Returns a lowercase, underscored representation of a string. Usage: >>> print(lunderize('The Cat in the Hat')) >>> 'the_cat_in_the_hat' """ title = title.lower() title = title.replace(' ', '_') title = title.replace('.', '') return title
b0e4320390164dbafe6bdfb7661aedbee0e04d1e
241,262
import torch from typing import Tuple def compute_dists(X: torch.Tensor, p: int = 1) -> Tuple[torch.Tensor, torch.Tensor]: """Computes distances between all pairs of points, keeps the unique ones and computes how many each of them repeat. Args: X (torch.Tensor): Sampling plan p (int, optional): Degree of norm. Defaults to 1. Returns: unique_dist (torch.Tensor): Unique distances between points J (torch.Tensor): Contains for every distance in unique_dist how many times it repeats in the nonunique distances. """ dist = torch.nn.functional.pdist(X, p=2) unique_dist = torch.unique(dist) J = torch.zeros_like(unique_dist) for i, d in enumerate(unique_dist): I = dist == d J[i] = torch.sum(I) return J, unique_dist
efde1e4e296b6f3fabb1e619e1372faeed6d7f1c
524,640
import requests def download_file(url: str, prefix=""): """Download file by streaming and writing to file. Args: url: download url. prefix: prefix for output file. """ local_filename = url.split("/")[-1] with requests.get(url, stream=True) as r: r.raise_for_status() with open(prefix + local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) return local_filename
7485b6d94415f986c63635bf8de0d71eb4577567
660,348
def add_missing_flow_by_fields(flowby_partial_df, flowbyfields): """ Add in missing fields to have a complete and ordered df :param flowby_partial_df: Either flowbyactivity or flowbysector df :param flowbyfields: Either flow_by_activity_fields, flow_by_sector_fields, or flow_by_sector_collapsed_fields :return: """ for k in flowbyfields.keys(): if k not in flowby_partial_df.columns: flowby_partial_df[k] = None # convert data types to match those defined in flow_by_activity_fields for k, v in flowbyfields.items(): flowby_partial_df.loc[:, k] = flowby_partial_df[k].astype(v[0]['dtype']) # Resort it so order is correct flowby_partial_df = flowby_partial_df[flowbyfields.keys()] return flowby_partial_df
672576a5fbe5e697e4dfb19c83f7416b6c5de4d2
382,114
import math def closest_pow(n, base=2): """Return the closest (in log space) power of 2 from a number.""" return base ** round(math.log(n, base))
7e71c6a15cde1895bf84be5ac38e8866ef6a6a4a
434,170
def serialize_for_deletion(elasticsearch_object_id): """ Serialize content for bulk deletion API request Args: elasticsearch_object_id (string): Elasticsearch object id Returns: dict: the object deletion data """ return {"_id": elasticsearch_object_id, "_op_type": "delete"}
8b5add9eb0ed1d9a8cd39ecc7896ac963ab7a1e2
112,894
def SetServiceAccountResource(ref, unused_args, request): """Add service account name to request name.""" request.name = ref.RelativeName() return request
c586c45edd37b176039fc16a597b9a89fcbd2c40
553,335
import numbers def is_number(x): """Test if `x` is a number.""" return isinstance(x, numbers.Number)
cb8d6507800dc4a56b939747642c099c8c1de5c0
625,185
def nbest_oracle_eval(nbest, n=None): """Return the evaluation object of the best sentence in the nbest list.""" return nbest.oracle_hyp(n=n).eval_
fe7641f6ccbaae7d85f620f4772e3a8b506880f5
700,034
from typing import List def possible(matrix: List[List[int]], x: int, y: int, n:int) -> bool: """ Based on sudoku's rules, checks if a integer n can be placed in matrix[x][y] without breaking the game. Return true if test pass, false otherwise """ # Check for problem in row for i in range(0, 9): if matrix[x][i] == n: return False # Check for problem in column for j in range(0, 9): if matrix[j][y] == n: return False # Initial indexes for inner square x0 = (x // 3) * 3 y0 = (y // 3) * 3 # Check for problem in inner square for i in range(0, 3): for j in range(0, 3): if matrix[x0 + i][y0 + j] == n: return False return True
4a7fe7ffc746dc6799ced18de59da7ddd318d0aa
270,764
def load_file(file_name): """ Load a file into a line list and remove the next line ending character. :param file_name: The name of the file to load :return: A list of file lines """ data_file = open(file_name, 'r') data = [line[:-1] for line in data_file] data_file.close() return data
94aab195a987354b2330d2218cbb59ea6a2ecdf1
398,019
def _string_token_value(tok): """ Given an ESCAPED_STRING token, unquote and unescape its value, to obtain the actual string it represents. :param tok: an ESCAPED_STRING token :return: The string value """ return tok.value[1:-1].replace('\\"', '"').replace("\\\\", "\\")
96af6420eac2c0dd3c32f74abddbf3290eb9e78a
169,725
def funcline_parts(base, topline): """ Parse the m-file function declaration. Returns [base, arguments, outputs] where base is the function name without the gsw_ prefix, arguments is a tuple of strings, and outputs is a tuple of strings. """ cmd = topline.split(None, 1)[1] parts = cmd.split('=') if len(parts) == 1: out = '' func_call = parts[0].strip() else: out = parts[0].strip() if out.startswith('['): out = out[1:-1] func_call = parts[1].strip() out = [a.strip() for a in out.split(',')] parts = func_call.split('(') if len(parts) == 1: argstring = '' else: argstring = parts[1][:-1] args = [a.strip() for a in argstring.split(',')] parts = [base[4:], tuple(args), tuple(out)] return parts
5654884c12cb8104687fce7ff81760014a9465eb
229,315
import string import re def partial_format(s, data, missing="{{{key}}}"): """Return string `s` formatted by `data` allowing a partial format Arguments: s (str): The string that will be formatted data (dict): The dictionary used to format with. Example: >>> partial_format("{d} {a} {b} {c} {d}", {'b': "and", 'd': "left"}) 'left {a} and {c} left' """ class FormatDict(dict): """This supports partial formatting. Missing keys are replaced with the return value of __missing__. """ def __missing__(self, key): return missing.format(key=key) formatter = string.Formatter() mapping = FormatDict(**data) try: f = formatter.vformat(s, (), mapping) except Exception: r_token = re.compile(r"({.*?})") matches = re.findall(r_token, s) f = s for m in matches: try: f = re.sub(m, m.format(**data), f) except KeyError: continue return f
a16426f524be187e4a08273762d09f23e9e5b0b2
205,848
from typing import List def _get_docstring_var_description(var_doc: str) -> str: """ Get a description of argument or return value from docstring. Parameters ---------- var_doc : str Docstring's part of argument or return value. Returns ------- description : str Description of argument or return value. """ var_doc = var_doc.rstrip() splitted_list: List[str] = var_doc.split('\n') if len(splitted_list) < 2: return '' description: str = '\n'.join(splitted_list[1:]) description = description.rstrip() return description
976f7fc2b05f42f21f66fc56191bad98ca905d34
426,515
import math def pi(max_k): """ Approximate pi using the first max_k+1 terms of Ramanujans series. Works for max_k up to 1228. """ ans = 0 for k in range(max_k+1): ans += math.factorial(4*k)*(1103+26390*k)/( math.factorial(k)**4 * 396**(4*k)) return 9801/(2*math.sqrt(2)*ans)
8b669d27adafe15a60c013238f04fc80c50d5697
346,905
def stringify(object_name, object_type, object_content): """ Given a stringifiable object, stringify it. :param object_name: Name of the object :param object_type: Type of the object :param object_content: The object itself :return: The stringified object """ return '%s#%s#%s' % (object_name, object_type, str(object_content))
733efb1b11de4dc62102f5d85ce12c90119f1b8f
536,380
from datetime import datetime def validate_timestamp(timestamp): # test """Validates timestamp input for correct format Args: timestamp: datetime string Returns: boolean: if string is in correct format """ try: datetime_input = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f') except ValueError: return False return datetime_input
175a235de7f7eb6187fbaf73a75b25924b01197a
585,473
import urllib.request, urllib.error, urllib.parse, socket def internet_access(timeout=1): """Return True if internet is on, else False.""" try: # Check google.com with numerical IP-address (which avoids # DNS loopup) and set timeout to 1 sec so this does not # take much time (google.com should respond quickly) #response = urllib2.urlopen('http://8.8.8.8', timeout=timeout) response = urllib.request.urlopen('http://vg.no', timeout=timeout) return True except (urllib.error.URLError, socket.timeout) as err: pass return False
6881299f490da708f170f1f659d2f4b6a0ac78df
110,519
def is_droppedaxis(slicer): """Return True if the index represents a dropped axis""" return isinstance(slicer, int)
549002693fad5c0fb4429f0cf9aafb0b1fa01b51
524,396
def mocked_requests_get(*_, **kwargs): """ fake requests.get() operation by providing an api_url and expect the exact responses corresponding to a set of urls params below """ usd_php_params = {'compact': 'ultra', 'q': 'USD_PHP'} eur_jpy_params = {'compact': 'ultra', 'q': 'EUR_JPY'} gbp_dkk_params = {'compact': 'ultra', 'q': 'GBP_DKK'} usd_eur_params = {'compact': 'ultra', 'q': 'USD_EUR'} class MockedRequestsModelsResponse(): """ a fake result from requests.get() """ def __init__(self, json_result, status_code): self.json_result = json_result self.status_code = status_code def json(self): """ a mock of requests.get().json() """ return self.json_result if kwargs['params'] == usd_php_params: return MockedRequestsModelsResponse({'USD_PHP': 46.211}, 200) elif kwargs['params'] == eur_jpy_params: return MockedRequestsModelsResponse({'EUR_JPY': 133.2801}, 200) elif kwargs['params'] == gbp_dkk_params: return MockedRequestsModelsResponse({'GBP_DKK': 8.377764}, 200) elif kwargs['params'] == usd_eur_params: return MockedRequestsModelsResponse({'USD_EUR': 0.805001}, 200) return MockedRequestsModelsResponse({}, 200)
3473e79e9f775d925500c9a1682d17e8bf4336f6
410,227
def gen_threads_started(df): """ Generate counts of threads started, by user and week. :param df: pd.DataFrame of forum post data. :return: pd.DataFrame of 'session_user_id', 'week', and threads_started. """ df_starts = df[df.thread_order == 1].groupby(['session_user_id', 'week']).size().rename('threads_started').reset_index() return df_starts
c75978ffd9490b2ea1d0ce78e68b069a4bc17608
358,592
def shorten(text, length=25, indicator="..."): """Returns text or a truncated copy with the indicator added text is any string; length is the maximum length of the returned string (including any indicator); indicator is the string added at the end to indicate that the text has been shortened >>> shorten("Second Variety") 'Second Variety' >>> shorten("Voices from the Street", 17) 'Voices from th...' >>> shorten("Radio Free Albemuth", 10, "*") 'Radio Fre*' """ if len(text) > length: text = text[:length - len(indicator)] + indicator return text
da8622c8654fd7b2dc1d4060f6ddae62df7bffe7
289,531
import yaml def get_terget_stocks(file_path): """Reads target stocks for long/short straddle strategies.""" try: with open(file_path) as f: target_stocks = yaml.load(f, Loader=yaml.FullLoader) return target_stocks except Exception as e: print(e)
98d6b9e52b7793bf0f53d9f39dcf067548635e44
256,988
def GetFuzzExtraEnv(extra_options=None): """Gets extra_env for fuzzing. Gets environment varaibles and values for running libFuzzer. Sets defaults and allows user to specify extra sanitizer options. Args: extra_options: A dict containing sanitizer options to set in addition to the defaults. Returns: A dict containing environment variables and their values that can be used in the environment libFuzzer runs in. """ if extra_options is None: extra_options = {} # log_path must be set because Chrome OS's patched compiler changes it. options_dict = {'log_path': 'stderr'} options_dict.update(extra_options) sanitizer_options = ':'.join('%s=%s' % x for x in options_dict.items()) sanitizers = ('ASAN', 'MSAN', 'UBSAN') return {x + '_OPTIONS': sanitizer_options for x in sanitizers}
9d15cecb86a915717229320c5416c56c67def0d3
340,377
def info(fname, expected, actual, flag): """ Convenience function returns nicely formatted error/warning msg. """ #typesはtype関数の結果のタプル #<type 'float'> -> '<type ', 'float', '>'の1番目 format = lambda types: ', '.join([str(t).split("'")[1] for t in types]) expected, actual = format(expected), format(actual) msg = "'%s' method " % fname \ + ("accepts", "returns")[flag] + " (%s), but " % expected\ + ("was given", "result is")[flag] + " (%s)" % actual return msg
1ad57feb80a4847c2c2d42851c70b02c21b74a89
223,894
def extract_image_information(image_element): """ Given an image selenium element containing the MLA image link, extract the MLA last name and image url """ root_url = 'assembly.ab.ca' name = image_element.get_attribute('alt') name = name.replace(' ', '_') name = name.title() url = image_element.get_attribute('src').replace('..', root_url) return name, url
5143f938c9f0cb478d468bed5ae88bf1bd39de02
380,806
import math def identityProbability(gs: int, mr: float = 1e-08) -> float: """Return the probabilty for a sequence to not change over n generations of evolution. **Keyword arguments:** gs -- number of generations mr -- mutation rate (default is 1e-08) """ return math.pow(1-mr, gs)
c8be5ef9c459e709482b69c0f4d7ace64364b639
70,958
def check_query_type(type): """Check the query type. Only the following query types are allowed: AllowedType: AllLabels: Query all _labels of an instance PartLabels: Query part of labels of an instance (Only available in multi-label setting) Features: Query unlab_features of instances NotImplementedQueryType Relations: Query relations between two object Examples: Query examples given constrains AllLabels: query all labels of an selected instance. Support scene: binary classification, multi-class classification, multi-label classification, regression Partlabels: query part of labels of an instance. Support scene: multi-label classification Features: query part of features of an instance. Support scene: missing features Parameters ---------- type: str query type. Returns ------- result: bool if query type in ['AllLabels', 'PartLabels', 'Features'],return True. """ assert (isinstance(type, str)) QueryType = ['AllLabels', 'PartLabels', 'Features'] NotImplementedQueryType = ['Relations', 'Examples'] if type in QueryType: return True else: return False
ffa2343fc0e7b883bea5e306c0ce0641892aaa77
107,737
def contains_params(path): """True if path contains params (';' part).""" return path.count(';') != 0
f5534b05a7fde4bd0818b9ad393e0fb2c1bd4d09
559,976
def get_attributes(label_data): """Reads all attributes of label. Args: label_data (dict): List of label attributes Returns: list: List of all attributes """ attributes = [] if label_data['attributes'] is not None: for attribute_name in label_data['attributes']: if len(label_data['attributes'][attribute_name]) > 0: for attribute in label_data['attributes'][attribute_name]: attributes.append(attribute['name']) return attributes
d2b233de47f0ca26ec592c87670440a8a213fe5e
303,224
def delta_seconds(before, after): """ Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6))
e26e385c14b1b479b6cff24828dbb9f6c8aaeab7
192,042
def one(*args, **kwargs): """ The function that is constant in 1 """ return 1
ed28daf877a093ea2e35219a63b939eb141ee383
120,190
def hapax(mydict): """Return a list of hapax. :param mydict: (dict) :returns: list of keys for which value = 1 """ return [k for k in mydict.keys() if mydict[k] == 1]
945256d39b3f6d8f8db28392635728aa3bef5dba
190,668
def get_gamma_function(gamma): """ :param gamma: desired factor gamma :return: Returns the lambda function of the gamma adjust operation """ return lambda x: pow(x / 255, gamma) * 255
48175cacc3c41fcac4da9dfdf7bc475c2f813bb6
41,099
def nucleotide(nucleotide_index): """ Convert nucleotide index to a character. """ nucleotides = ['?','A','C','G','T'] if 1 <= nucleotide_index and nucleotide_index <= 4: return nucleotides[nucleotide_index] return '?'
392d8c623abada4179d8df5605970edb3568a388
452,069
def debye_spectral_density(omega: float, cutoff_freq: float, reorg_energy: float) -> float: """ Calculates the Debye spectral density at frequency omega, with a given cutoff frequency omega_c and reorganisation energy lam. It is normalised so that if omega=omega_c and lam=1, the spectral density evaluates to 1. Implements an asymmetric spectral density, evaluating to zero for omega <= 0. It is given by: .. math:: \\omega^2 J(\\omega_{ab}) = f \\frac{2\\omega_c\\omega}{(\\omega_c^2 + \\omega^2) \\omega^2} Parameters ---------- omega : float The frequency at which the spectral density will be evaluated, in units of rad ps^-1. If omega <= 0, the spectral density evaluates to zero. cutoff_freq : float The cutoff frequency at which the spectral density evaluates to 1 (or the reorg_energy value if f not equal to 1), in units of rad ps^-1. Must be a non-negative float. reorg_energy : float The factor by which the spectral density should be scaled by. Should be passed in units of rad ps^-1. Must be a non-negative float. Returns ------- float The Debye spectral density at frequency omega, in units of rad ps^-1. """ assert cutoff_freq >= 0., ( 'The cutoff freq must be a non-negative float, in units of rad ps^-1') assert reorg_energy >= 0., ( 'The scaling factor must be a non-negative float, in units of rad ps^-1') if omega <= 0 or cutoff_freq == 0 or reorg_energy == 0: # Zero if omega < 0 as an asymmetric spectral density used. # Zero if omega = 0 or cutoff = 0 to avoid DivideByZero error. return 0. # Returned in units of rad ps^-1 return 2 * reorg_energy * omega * cutoff_freq / (omega**2 + cutoff_freq**2)
5771380e66d43d5eaaacec334fe7fcb8f15bfc0d
420,017
def seq3(seq): """Turn a one letter code protein sequence into one with three letter codes. The single input argument 'seq' should be a protein sequence using single letter codes, either as a python string or as a Seq or MutableSeq object. This function returns the amino acid sequence as a string using the three letter amino acid codes. Output follows the IUPAC standard (including ambiguous characters B for "Asx", J for "Xle" and X for "Xaa", and also U for "Sel" and O for "Pyl") plus "Ter" for a terminator given as an asterisk. Any unknown character (including possible gap characters), is changed into 'Xaa'. e.g. >>> from Bio.SeqUtils import seq3 >>> seq3("MAIVMGRWKGAR*") 'MetAlaIleValMetGlyArgTrpLysGlyAlaArgTer' This function was inspired by BioPerl's seq3. """ threecode = {'A':'Ala', 'B':'Asx', 'C':'Cys', 'D':'Asp', 'E':'Glu', 'F':'Phe', 'G':'Gly', 'H':'His', 'I':'Ile', 'K':'Lys', 'L':'Leu', 'M':'Met', 'N':'Asn', 'P':'Pro', 'Q':'Gln', 'R':'Arg', 'S':'Ser', 'T':'Thr', 'V':'Val', 'W':'Trp', 'Y':'Tyr', 'Z':'Glx', 'X':'Xaa', '*':'Ter', 'U':'Sel', 'O':'Pyl', 'J':'Xle', } #We use a default of 'Xaa' for undefined letters #Note this will map '-' to 'Xaa' which may be undesirable! return ''.join([threecode.get(aa,'Xaa') for aa in seq])
0afe4b6ffe5edba865bd5810ae60fb2aa05320d6
359,168
import unicodedata def is_chinese_char(cc): """ Check if the character is Chinese args: cc: char output: boolean """ return unicodedata.category(cc) == 'Lo'
d376e6097e628ac2f3a7934ba42ee2772177f857
707,802
def okay(resp): """Check whether an HTTP response is 200 OK.""" return resp.status_code == 200
b2b4d1c98f327639160f398ec5dba4e64fc2d4d5
469,582
import networkx as nx def reachable_from(graph, node): """ Given an nx graph and a node within this graph, return all of the nodes in the same connected component as the input node. """ if isinstance(graph, nx.DiGraph): conn = nx.all_pairs_node_connectivity(graph) return [n1 for (n1, dist) in conn[node].iteritems() if dist > 0] elif isinstance(graph, nx.Graph): for comp in nx.connected_components(graph): if node in comp: return comp # Shouldn't get here # since the node should appear in some connected component of the graph raise Exception("Node {} not in graph".format(node))
8364f61f602fce28798f6bf4c912fcfcc6df78a5
361,147
import torch def sinkhorn(log_alpha, n_iters = 20): # torch version """Performs incomplete Sinkhorn normalization to log_alpha. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the successive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(log_alpha) (element wise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. [1] Sinkhorn, Richard and Knopp, Paul. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967 Args: log_alpha: a 2D tensor of shape [N, N] n_iters: number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for N~100) Returns: A 3D tensor of close-to-doubly-stochastic matrices (2D tensors are converted to 3D tensors with batch_size equals to 1) """ n = log_alpha.size()[1] log_alpha = log_alpha.view(-1, n, n) for i in range(n_iters): log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=2, keepdim=True)).view(-1, n, 1) log_alpha = log_alpha - (torch.logsumexp(log_alpha, dim=1, keepdim=True)).view(-1, 1, n) return torch.exp(log_alpha)
d72f33f4415f0676221d069cddd08c795d458193
71,193
from typing import Dict from typing import List def subset_nested_dict( obj: Dict, key_sequence: List, ) -> Dict: """Create subset of nested dictionary. Args: obj: A (nested) dictionary. key_sequence: A sequence of keys, to be applied from outside to inside, pointing to the key (and descendants) to keep. Returns: Subset of input dictionary. """ filt = {} if len(key_sequence): key = key_sequence.pop(0) filt[key] = obj[key] if len(key_sequence): filt[key] = subset_nested_dict(filt[key], key_sequence) return filt
2251a28437490c3424f38eb77037468a12f76173
288,269
import torch def mixup_data(x, y, l): """Returns mixed inputs, pairs of targets, and lambda""" indices = torch.randperm(x.shape[0]).to(x.device) mixed_x = l * x + (1 - l) * x[indices] y_a, y_b = y, y[indices] return mixed_x, y_a, y_b
063175fe09267684633b329cd4fa315cc1b9c89d
644,184
def integer_bisect(bound, f): """ Finds a pair of integers (a,b) such that f(a) <= 0 < f(b) and |a - b| == 1. On entry, assumes that f(bound[0]) <= 0 < f(bound[1]) """ if bound[1] - bound[0] == 1: return bound else: midpoint = round(bound[0] + bound[1] / 2.0) if f(midpoint) <= 0: return integer_bisect((midpoint, bound[1]), f) else: return integer_bisect((bound[0], midpoint), f)
bfdc9cbdead3b239e0ef7f52b05e173dbf7e7658
617,476
def _str_to_bool(arg): """ Return True or False depending on input string Parses the string 'arg' and returns True if it has the value "true", False if it has the value "false" and throws an exception otherwise. """ if arg is None: return False if arg == 'true': return True elif arg == 'false': return False else: raise ValueError('Only values true and false permitted')
c51aeb47f0dd974c0833ed51ea36bd521c4f2b3d
239,560
def norm3_sqr(x, y, z): """ Squared 2-norm for 3d vectors :param x: :param y: :param z: :return: """ return (x * x) + (y * y) + (z * z)
f11c3bd6744aa79592a552bdd3bf7db8896c6060
598,165
def get_threshold(data_vals): """ Find a reasonable threshold to use based on the data values """ pend_len, time_vals, sens_vals = data_vals max_value = sens_vals.max() min_value = sens_vals.min() threshold = 0.5*(max_value + min_value) return threshold
edee1a8d32ba7f3af920c26ff576ab234f563b81
557,734
def remove_campaigns(api, assessment_id): """Remove all campaigns from an assessment.""" allCampaigns = api.campaigns.get() for campaign in allCampaigns: if campaign.name.startswith(assessment_id): api.campaigns.delete(campaign.id) return True
1b7b0f99798aa7aad115b15aa025ad596f212751
75,969
def get_deadline_delta(target_horizon): """Returns number of days between official contest submission deadline date and start date of target period (0 for weeks 1-2 target, as it's 0 days away, 14 for weeks 3-4 target, as it's 14 days away, 28 for weeks 5-6 target, as it's 28 days away) Args: target_horizon: "12w", "34w", or "56w" indicating whether target period is weeks 1 & 2, weeks 3 & 4, or weeks 5 & 6 """ if target_horizon == "12w": deadline_delta = 0 elif target_horizon == "34w": deadline_delta = 14 elif target_horizon == "56w": deadline_delta = 28 else: raise ValueError("Unrecognized target_horizon "+target_horizon) return deadline_delta
82f8b54d83c6e4ac969fe9a6044d0a92f3882702
462,814
def emd_function_value(pixel_group, base): """Calculate and return the f value of the given pixel group f value is defined as a weighted sum of the pixel values modulo a chosen base""" f_value = 0 for i in range(len(pixel_group)): f_value = (f_value + pixel_group[i] * (i + 1)) % base return f_value
d4ec4819911283a0ffd55c0060c51e8819b8c638
81,281
import typing from typing import TypeGuard import inspect def is_async_iterable(obj: typing.Any) -> TypeGuard[typing.AsyncIterable[object]]: """Determine if the object is an async iterable or not.""" attr = getattr(obj, "__aiter__", None) return inspect.isfunction(attr) or inspect.ismethod(attr)
890f931871455e863441ba5b813d23631c065c9d
571,708
import torch def get_state(model, quantizer, half=False): """Get the state from a model, potentially with quantization applied. If `half` is True, model are stored as half precision, which shouldn't impact performance but half the state size.""" if quantizer is None: dtype = torch.half if half else None state = {k: p.data.to(device='cpu', dtype=dtype) for k, p in model.state_dict().items()} else: state = quantizer.get_quantized_state() state['__quantized'] = True return state
d2f9dcbf0778593c60967de8f7c102946f97e52b
253,730
def normalize_go_id(identifier: str) -> str: """If a GO term does not start with the ``GO:`` prefix, add it.""" if not identifier.startswith('GO:'): return f'GO:{identifier}' return identifier
01cac614f0a8833edc5ffedfcb9008b21dba2d6e
582,839
def make_title(text,underline="="): """ Turn a string into a Markdown/rst title Arguments: text (str): text to make into title underline (str): underline character (defaults to '=') Returns: String: title text. """ return "%s\n%s" % (text,underline*len(text))
99bd8825f500bad42c051f9db701b3da108ba61b
476,632
def get_user_quests(user): """Get the quests the user is participating in.""" return user.quest_set.filter( questmember__user=user, questmember__opt_out=False, questmember__completed=False )
ae5e0c7fddd79b167e274ee8cde7ced7010ed492
487,246
import pwd def username_from_uid(uid: int) -> str: """Convert a UID to a username.""" try: username = pwd.getpwuid(uid).pw_name.replace(".", "__") except KeyError: username = f"__unknown_uid_{uid}__" return username
ae400888f3c89f8b26c413138c42b430285fe6f9
45,891
def get_method_name_from_frame(frame) -> str: """ Retrieves the method name from the specified frame. :param frame: the frame to check :return: the method name from the specified frame """ return frame.f_code.co_name
a8e3a19a357482e267fd5cec9054b9a840b1efe2
536,532
import pickle def load_pickle(filepath: str) -> object: """ Loads a pickle file to memory. :param filepath: Path pointing to file """ with open(filepath, 'rb') as handle: return pickle.load(handle)
12e7cc7825814b4e9e1d2e15ac87e95e22315004
647,914
def str_name_value(name, value, tab=4, ljust=25): """ This will return a str of name and value with uniform spacing :param name: str of the name :param value: str of the value :param tab: int of the number of spaces before the name :param ljust: int of the name ljust to apply to name :return: str of the formatted string """ rep_name = (name.startswith('_') and name[1:]) or name try: return ' ' * tab + str(rep_name).ljust(ljust) + \ str(value).replace('\n', '\n' + ' ' * (ljust + tab)) except: rep_name = "Exception in serializing %s value" % name return ' ' * tab + str(rep_name).ljust(ljust) + \ str(value).replace('\n', '\n' + ' ' * (ljust + tab))
fdcbba230e6045c3f84bc050cf3774fe0e4c6036
36,509
def random_walk_for_loop(start, states, n): """Random walk using Python for loop""" acc = start for _ in range(n): acc = acc @ states return acc
c84a09c6e8b76ffb74ad7db543522adbbcf86b3d
396,055
def current_filename_with_extensions(filename, extensions): """ A private helper method. Returns the filename and its extensions. :param filename: a string, the file's name :param extensions: a list, the extensions :return: a string, a filename with extensions """ filename_with_extensions = filename for extension in extensions: filename_with_extensions += "." + extension return filename_with_extensions
868715ebf3aabe5226ad9107ff1cf77a4f1b6b11
426,893
def embeddedness(target_list, compared_list): """ Measure the embedddedness of one list within another; embeddedness of A in B = #(A int B)/#A :param target_list: The target list :param compared_list: The list to be compared with :return: Embeddedness score """ intersection = [e for e in target_list if e in compared_list] return len(intersection)/len(target_list)
cb421e31a15c863024352d6fc5e0455b871e97dd
164,307
import torch def collate_fn(data): """Creates mini-batch tensors from the list of images. We should build custom collate_fn rather than using default collate_fn, because merging caption (including padding) is not supported in default. Args: data: list - image: torch tensor of shape (3, 256, 256). Returns: images: torch tensor of shape (batch_size, 3, 256, 256). """ comp_imgs, bg_imgs = zip(*data) comp_imgs = list(comp_imgs) bg_imgs = list(bg_imgs) # hole_images = list(hole_images) # masks = list(masks) return torch.stack(comp_imgs, dim=0), torch.stack(bg_imgs,0)
a2c4c99376d428612a8acf7754f1a22bcdb848f1
332,008
import json def _load_notebook(f): """ Load the ipython notebook as a dict. Parameters ---------- f: str Path to the schema file. Returns ------- dict json string representing the notebook file. """ with open(f, 'r') as json_file: notebook = json.load(json_file) return notebook
3f7f23de647c813a9464525b198c0598c0960a2c
505,495
def double_day(bday1, bday2): """Calculates the day when one person is twice as old as the other one""" if bday1 > bday2: delta = bday1 - bday2 double_day = bday1 + delta else: delta = bday2 - bday1 double_day = bday2 + delta return double_day
f9b69c324c2ec5da6ff538b2dbd89bfbe1cbba3a
323,329
def _value_grouped(df, index_grouped, col): """Return index_grouped as list of value list""" value = [] for item in index_grouped: value_val = df.iloc[item, col].to_list() value.append(value_val) return value
5491260469a06d57b9d4bdda04fa4e29011250ed
135,840
import string import random def random_str(length: int = 10, unique: str = '') -> str: """Generate a random string Args: length (int): Optional. The length of the generated str. unique (str): Optional. Add a unique str to the end of the generated str (if you want to make sure that it's unique). Returns: str: randomly generated string """ letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length)) + unique
b8bf3d1d46f686135cf9a22cb57e669b1614d4bc
437,342
def area(box): """ area of box""" return (box[3] - box[1]) * (box[2] - box[0])
5e4213ce0e3af75498f21d6835d20af77c8690ec
541,457
import collections def majority_element(arr): """ Given an array of size n, find the majority element. The majority element is the element that appears more than floor(n/2) times. """ counts = collections.defaultdict(int) for elem in arr: counts[elem] += 1 if counts[elem] > len(arr) / 2: return elem
f5cced7c9c5d6218f328665db9bfb0a44a0b75cf
82,227
def vehicle_offset(img, left_x, right_x, xm_per_pix): """Measures offset of the vehicle from the center of the lane.""" lane_center = (left_x + right_x) / 2 x_max = img.shape[1] vehicle_center = x_max / 2 return xm_per_pix * (vehicle_center - lane_center)
8cd95db5a2413a6fae620ac4e020afc2130c3c90
435,516
def WI_statewide_eqn(Qm, A, Qr, Q90): """Regression equation of Gebert and others (2007, 2011) for estimating average annual baseflow from a field measurement of streamflow during low-flow conditions. Parameters ---------- Qm : float or 1-D array of floats Measured streamflow. A : float or 1-D array of floats Drainage area in watershed upstream of where Qm was taken. Qr : float or 1-D array of floats Recorded flow at index station when Qm was taken. Q90 : float or 1-D array of floats Q90 flow at index station. Returns ------- Qb : float or 1-D array of floats, of length equal to input arrays Estimated average annual baseflow at point where Qm was taken. Bf : float or 1-D array of floats, of length equal to input arrays Baseflow factor. see Gebert and others (2007, 2011). Notes ----- Gebert, W.A., Radloff, M.J., Considine, E.J., and Kennedy, J.L., 2007, Use of streamflow data to estimate base flow/ground-water recharge for Wisconsin: Journal of the American Water Resources Association, v. 43, no. 1, p. 220-236, http://dx.doi.org/10.1111/j.1752-1688.2007.00018.x Gebert, W.A., Walker, J.F., and Kennedy, J.L., 2011, Estimating 1970-99 average annual groundwater recharge in Wisconsin using streamflow data: U.S. Geological Survey Open-File Report 2009-1210, 14 p., plus appendixes, available at http://pubs.usgs.gov/ofr/2009/1210/. """ Bf = (Qm / A) * (Q90 / Qr) Qb = 0.907 * A**1.02 * Bf**0.52 return Qb.copy(), Bf.copy()
e02dc3ef384435d93eab7fdc6f96d60e98fe4c6f
676,934
def to_list_of_lists(matrix): """ Convert a 2D matrix into a list of lists """ return [ [matrix[i, j] for j in range(matrix.shape[1])] for i in range(matrix.shape[0]) ]
b5d31660c6bfc31ab9ed4f79e82548dde977921d
275,171
def StartsWith(lines, pos, string): """Returns True iff the given position in lines starts with 'string'.""" return lines[pos.line][pos.column:].startswith(string)
1c427d15b9376c87f6f8a01319724473d21a06fa
70,416
def extract_events(json_node, collect_function): """Depth-first search of events returning them as a list of lists, therefore implicitly saving the parent-child relationship between events. Args: json_node: Json representation of the current events. collect_function: Function that collects the desired data for events. Returns: A list of lists representation that contains all the events traversed by the depth-first search. """ if len(json_node) == 0: return [] extracted_events = list() child_events = json_node['children'] if child_events == []: extracted_events.extend([collect_function(json_node), []]) else: extracted_events.extend([collect_function(json_node), [ extract_events(ce, collect_function) for ce in child_events]]) return extracted_events
63c591bcef87b38c9a47eb8221fe9eacdb36001d
508,167
from typing import Union import pathlib from typing import Dict from typing import Any import json def load(file_path_string: Union[str, pathlib.Path]) -> Dict[str, Any]: """Loads a workspace from a file. Args: file_path_string (Union[str, pathlib.Path]): path to the file to load the workspace from Returns: Dict[str, Any]: ``pyhf``-compatible `HistFactory` workspace """ file_path = pathlib.Path(file_path_string) ws = json.loads(file_path.read_text()) return ws
1eacb0e816d01f96143f68114b8439aa785144a8
490,896
def instances_for_export(data_dictionary, start=None, end=None): """ Returns Instance submission queryset filtered by start and end dates. """ kwargs = dict() if start: kwargs['date_created__gte'] = start if end: kwargs['date_created__lte'] = end return data_dictionary.instances.filter(**kwargs)
271754ec0b574f2992a4937bf2d6672a9da7667c
487,496
import math def lzprob(z): """ Returns the area under the normal curve 'to the left of' the given z value. Thus, for z<0, zprob(z) = 1-tail probability for z>0, 1.0-zprob(z) = 1-tail probability for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability Adapted from z.c in Gary Perlman's |Stat. Usage: lzprob(z) """ Z_MAX = 6.0 # maximum meaningful z-value if z == 0.0: x = 0.0 else: y = 0.5 * math.fabs(z) if y >= (Z_MAX*0.5): x = 1.0 elif (y < 1.0): w = y*y x = ((((((((0.000124818987 * w -0.001075204047) * w +0.005198775019) * w -0.019198292004) * w +0.059054035642) * w -0.151968751364) * w +0.319152932694) * w -0.531923007300) * w +0.797884560593) * y * 2.0 else: y = y - 2.0 x = (((((((((((((-0.000045255659 * y +0.000152529290) * y -0.000019538132) * y -0.000676904986) * y +0.001390604284) * y -0.000794620820) * y -0.002034254874) * y +0.006549791214) * y -0.010557625006) * y +0.011630447319) * y -0.009279453341) * y +0.005353579108) * y -0.002141268741) * y +0.000535310849) * y +0.999936657524 if z > 0.0: prob = ((x+1.0)*0.5) else: prob = ((1.0-x)*0.5) return prob
8f2d367f7820675a55990e490e15f41577c05753
209,683
from pathlib import Path from typing import Optional def get_file_by_type(folder: Path, suffix: str) -> Optional[Path]: """ Extracts a file by the suffix. If no files with the suffix are found or more than one file is found returns `None`""" if not suffix.startswith('.'): suffix = '.' + suffix candidates = [i for i in folder.iterdir() if i.suffix == suffix] if len(candidates) == 1: filename = candidates[0] else: filename = None return filename
a062be4f3a449473b725f735a622b24432212934
654,224
def parse_bad_channels(sss_log): """Parse bad channels from sss_log.""" with open(sss_log) as fid: bad_lines = {l for l in fid.readlines() if 'Static bad' in l} bad_channels = list() for line in bad_lines: chans = line.split(':')[1].strip(' \n').split(' ') for cc in chans: ch_name = 'MEG%01d' % int(cc) if ch_name not in bad_channels: bad_channels.append(ch_name) return bad_channels
c92473bc093b97725d669cc812db222dd81a831b
326,724
def comp_periodicity_time(self, slip=0): """Compute the (anti)-periodicities of the machine in time domain Parameters ---------- self : Machine A Machine object slip: float Rotor asynchronous slip Returns ------- pert_S : int Number of periodicities of the machine over time period (p/felec by default if Nrev is None) in static referential is_apert_S : bool True if an anti-periodicity is possible after the periodicities (in static referential) pert_R : int Number of periodicities of the machine over time period (p/felec by default if Nrev is None) in rotating referential is_apert_R : bool True if an anti-periodicity is possible after the periodicities (in rotating referential) """ if slip == 0: # Rotor and fundamental field rotate synchronously # In static referential (stator), rotor (anti)-periodicity in spatial domain # becomes (anti)-periodicity in time domain pert_S, is_apert_S = self.rotor.comp_periodicity_spatial() # In rotating referential (rotor), fundamental field is static so there is no anti-periodicity # and periodicity is given by stator spatial periodicity pert_R, _ = self.stator.comp_periodicity_spatial() is_apert_R = False else: # In case of non-zero slip, rotor and fundamental field rotates asynchronously # so there is no (anti)-periodicity in time domain pert_S, is_apert_S, pert_R, is_apert_R = 1, False, 1, False return pert_S, is_apert_S, pert_R, is_apert_R
e9512b2b71434ddfeb3dc3e99dbd376016ae1031
564,787
def chunker(seq, size): """returns non-overlapping chunks of length size for given sequence. can be used in a loop - A = 'ABCDEFG' for group in chunker(A, 2): print(group) -> 'AB' 'CD' 'EF' 'G'""" return (seq[pos:pos + size] for pos in range(0, len(seq), size))
708a46291ee904646c0de1e74890c412b406e58d
411,627
def blockchain_p2p_ports(blockchain_number_of_nodes, port_generator): """ A list of unique port numbers to be used by the blockchain nodes for the p2p protocol. """ return [next(port_generator) for _ in range(blockchain_number_of_nodes)]
c5aadcc94d1c05fa9a6694b8fb5a98a52f3fac9e
394,053
def root(path): """Return the root name of a path(with directory included)""" return path.with_suffix("", None)
1ba80d1e00083da8d0d630ec2dd13e8e854b77dd
402,256
def normalize(yml_dict, validator, schema, plot_types): """ Inputs: :yml_dict: (dictionary) the dictionary to be normalized against a schema :validator: (SimmerValidator) the validator object used. :schema: the schema against which the yml_dict is normalized. :plot_types: (list of strings) the basic plot_types that must be in the uppermost keys. Outputs: :normalized: normalized dictionary. """ validator.schema = schema for plot_type in plot_types: if plot_type not in yml_dict.keys(): yml_dict[plot_type] = {} normalized = validator.normalized(yml_dict) return normalized
7fef0ffe951b6713beafd1e3ff17f41adc871d46
191,971
import random import string def random_password(minlength=20, maxlength=30): """ Generate random string used as password. """ length = random.randint(minlength, maxlength) letters = string.ascii_letters + string.digits return ''.join([random.choice(letters) for _ in range(length)])
25b0f61c6d77da7e4ab7958c7520d23c4495dbed
311,217
def parse_specific_gate_opts(strategy, fit_opts): """Parse the options from ``fit_opts`` which are relevant for ``strategy``. """ gate_opts = { 'tol': fit_opts['tol'], 'steps': fit_opts['steps'], 'init_simple_guess': fit_opts['init_simple_guess'], 'condition_tensors': fit_opts['condition_tensors'], 'condition_maintain_norms': fit_opts['condition_maintain_norms'], } if 'als' in strategy: gate_opts['solver'] = fit_opts['als_solver'] gate_opts['dense'] = fit_opts['als_dense'] gate_opts['enforce_pos'] = fit_opts['als_enforce_pos'] gate_opts['pos_smudge'] = fit_opts['als_enforce_pos_smudge'] elif 'autodiff' in strategy: gate_opts['autodiff_backend'] = fit_opts['autodiff_backend'] gate_opts['autodiff_optimizer'] = fit_opts['autodiff_optimizer'] return gate_opts
bd41f2cdf45bea9ad4a0ce26b03f226e76584716
92,830
def _is_non_negative_float(item): """Verify that value is a non-negative number.""" if not isinstance(item, (int, float)): return False return item >= 0
ba942a5ad1527caa0da4192b46abc834ac7c5fac
518,885