content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _generate_new_prefix(current_prefix, class_name): """ Generate the new prefix to be used when handling nested configurations. Examples: >>> _generate_new_prefix("", "config_group_1") "CONFIG_GROUP_1" >>> _generate_new_prefix("my_app", "another_config_group") "MY_APP_ANOTHER_CONFIG_GROUP" :param prefix: the prefix to be used, can be empty :param field_name: the name of the field from which the variable is derived """ return ( "_".join((current_prefix, class_name)).upper() if current_prefix else class_name.upper() )
ebf79d4ac649de2f0e813cc538d1fdd530600f9c
55,227
from typing import Dict from typing import Any import zlib import base64 def _serialize_bytes(data: bytes, compress: bool = True) -> Dict[str, Any]: """Serialize binary data. Args: data: Data to be serialized. compress: Whether to compress the serialized data. Returns: The serialized object value as a dict. """ if compress: data = zlib.compress(data) value = { "encoded": base64.standard_b64encode(data).decode("utf-8"), "compressed": compress, } return {"__type__": "b64encoded", "__value__": value}
c77236ef8d3e019d09d1c9de53627f87cb88b4b2
55,230
import re def parse_cuda_device_id(device: str) -> int: """Parse cuda device index from a string. Args: device (str): The typical style of string specifying cuda device, e.g.: 'cuda:0'. Returns: int: The parsed device id, defaults to `0`. """ match_result = re.match('([^:]+)(:[0-9]+)?$', device) assert match_result is not None, f'Can not parse device {device}.' assert match_result.group(1).lower() == 'cuda', 'Not cuda device.' device_id = 0 if match_result.lastindex == 1 else int( match_result.group(2)[1:]) return device_id
37b951cdf680d9efdd8a663ccaab9b7a915290cb
55,234
import math def combination_num(n, r): """Combination formula to calculate the maximum amount of different combinations "n choose r" :param n (int) : n distict objects r (int) : sample of r elements :return Returns result of combination calculation""" if n < r: raise Exception('n cannot be less than r') if n == 0 or r == 0: raise Exception('n or r cannot be 0') return math.factorial(n) / (math.factorial(r) * math.factorial(n - r))
593c801c1bd878d62191a207a5b79174c1427db1
55,242
def msi(b8a, b11): """ Moisture Stress Index (Rock, Williams, and Vogelmann, 1985). .. math:: MSI = b11 / b8a :param b8a: NIR narrow. :type b8a: numpy.ndarray or float :param b11: SWIR 1. :type b11: numpy.ndarray or float :returns MSI: Index value .. Tip:: Rock, B.N., Williams, D.L., Vogelmann, J.E. 1985. Field and airborne \ spectral characterization of suspected acid deposition damage in red \ spruce (picea rubens) from vermont. in: Proceedings of the 11th \ International Symposium—Machine Processing of Remotely Sensed Data, \ West Lafayette, IN, USA, 25-27 June 1985; pp. 71-81. """ MSI = b11 / b8a return MSI
3be0d40e9d730edd3253f8523fb0c4aed5c13130
55,244
def flatten_list(l): """Flatten a 2D list""" return sum(l, [])
11250bd14c2da90dc9839d0bc89a9eac8f6770f1
55,245
def make_unpack_map(node): """ Make a mapping of unpack values for a template instance. Parameters ---------- node : TemplateInstanceNode The compiler node for the template instantiation. Returns ------- result : dict A dict mapping unpack name to compiler node for the template instantiation. """ return dict(zip(node.names, node.iternodes()))
7e48608fd2ca73001ca4bf3d9d8398a344b64efa
55,248
import inspect def string_annotation(typ, default): """ Construct a string representation of a type annotation. Parameters ---------- typ : type Type to turn into a string default : any Default value (if any) of the type Returns ------- str String version of the type annotation """ try: type_string = ( f"`{typ.__name__}`" if typ.__module__ == "builtins" else f"`{typ.__module__}.{typ.__name__}`" ) except AttributeError: type_string = f"`{str(typ)}`" if default is None: type_string = f"{type_string}, default ``None``" elif default == inspect._empty: pass else: type_string = f"{type_string}, default ``{default}``" return type_string
4d3200f113ad8818203c86b5bd288ca245dc3aca
55,249
def get_thing_types(tx): """ Get all schema types, excluding those for implicit attribute relations and base types Args: tx: Grakn transaction Returns: Grakn types """ schema_concepts = tx.query( "match $x sub thing; " "not {$x sub @has-attribute;}; " "not {$x sub @key-attribute;}; " "get;") thing_types = [schema_concept.get('x').label() for schema_concept in schema_concepts] [thing_types.remove(el) for el in ['thing', 'relation', 'entity', 'attribute']] return thing_types
7a9909ad9f4ff4b0444466be621a472afce00b86
55,250
def _last_char(word): """Get last alphanumeric character of word. :param word: word to get the last letter of. """ for i in range(len(word)): if word[len(word)-1-i].isalpha() or word[len(word)-1-i].isdigit(): return len(word) - 1 - i return -1
e01f3a6283e11c13eef0e6272fe76b6f2254d582
55,260
import re def normalize_docstring(docstring): """ Normalizes whitespace in the given string. >>> normalize_docstring('This is\ta docstring.') 'This is a docstring.' """ return re.sub(r"[\r\n\t ]+", " ", docstring).strip()
2d500d981f7d5c6929b31466ecac04115493efc1
55,262
def make_hash(args): """Generate a unique hash for the experience""" if "killing" in args: return "%(network)s-m=%(method)s-d=%(directivity)s-k=%(killing)s" % args else: return "%(network)s-m=%(method)s-d=%(directivity)s" % args
5933217673d15abed8460131432f62440072d1dd
55,263
def get_long_be(s): """Convert a 4-char value to integer.""" return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
6c9b27066b36eebc5b74d021e8c3b8f4463ae0da
55,265
from typing import OrderedDict def parse_attributes(attribute_str): """ Parse attribute field of GFF3 file :param attribute_str: attribute field as string :return: dict with the values for each attribute as key """ attributes = attribute_str.split(';') output = OrderedDict() for attribute in attributes: key, value = attribute.split('=') output[key] = value return output
51aa127d00934b22334749f37910d9d379e36018
55,268
import re def formula_to_composition(formula): """ Breaks a molecular formula into its constituent elements. Returns a dictionary of element keys and an integer formula """ # Ignore the first value in the split, because it will be blank from # trying to split on the first element symbol. parts = [re.split('([0-9]+)', test) for test in re.findall('[A-Z][a-z]?[0-9]*', formula)] values = [int(part[1]) if len(part) > 1 else 1 for part in parts] elements = [part[0] for part in parts] return {k:int(v) for k, v in zip(elements, values)}
4725e74a660f9302349659f35ae002a374ab7cde
55,271
def summarize(seq,field_name,func=None,msg=None): """ Compute a summary of a given field (eg. count of different values). Resulting dictionary is either passed to `func`, or printed on screen (if `func is None`). :param seq: Datastream :param field_name: Field name to summarize :param func: Function to call after summary is obtained (which is after all stream processing). If `None`, summary is printed on screen. :param msg: Optional message to print before summary :return: Original stream """ if field_name is None: return seq d = {} for x in seq: c = x.get(field_name) if c is not None: d[c] = d.get(c,0)+1 yield x if func is not None: func(d) else: if len(d.keys())>0: if msg is not None: print(msg) for t in d.keys(): print(" + {}: {}".format(t,d[t]))
3d7e1b0fcec9a1464d5fd1e1589c5c794af34bc8
55,273
def _total_query(db): """ Query # of lead art elements """ result = db.query(""" select count(distinct(lead_art_url)) as count from seamus """) value = list(result).pop() return value['count']
ac1320612cf6f84323870a6ce604e4a4a7b5adb8
55,274
import re def strip_text(text, pattern='.* ?- ?(.*) (Abc Iview)?.*'): """ Strips text matching the pattern from the string. Args: text (str): the string to clean pattern (str): the regex pattern for matching the text to remove. This pattern needs to define a single capture that defines the text to keep. Returns: the matched substring, or the original string if no pattern matches were made. """ match = re.match(pattern, text) if match: return match.group(1) return text
e735d44a69a7435bc88b43989dc785a022f66fb3
55,281
def _otus_abundant(freq_counts, rare_threshold): """Count number of abundant OTUs.""" return freq_counts[rare_threshold + 1:].sum()
8c5545eea2f1e982d088a2bc02d7a072649ddb17
55,284
def check_sequence_names_unique(sequence): """ Checks that the names of entities are all unique """ entity_names = [x['name'] for x in sequence] result = len(entity_names) == len(set(entity_names)) if not result: return { 'result': False, 'message': "All names of entries in ssl_sequence must be unique." } return { 'result': True, 'message': "All entry names are unique." }
f1777c0911bdae0b1b88eabb280a0e62f5aa96fc
55,286
def std_chr_name(chrom_str): """Standardize chromosome name so it starts with 'chr'""" if chrom_str.startswith("chr"): return chrom_str else: if chrom_str.startswith("Chr"): return "chr" + chrom_str[3:] else: return "chr" + chrom_str
f5fe9c35cf9ba33e80d7e28b3a1a3cef4815fffe
55,296
def new_source(resource_id, text, offset, length): """ Create a new SADFace source (Python dict) using the supplied resource ID (a source always refers to an existing resource object) and identifying a section of text in the resource as well as an offset & segment length for locating the text in the original resource. As the resource object is enhanced to account for newer "types" of resource, so the source object must be enhanced to keep track and enable sources to index sub-parts of resources. Returns: A Python dict representing the new SADFace source """ new_source = {"resource_id":resource_id, "text":text, "offset":offset, "length":length} return new_source
103edd887c3a87f8af5d3ea935c5e1a8214eb0ac
55,298
import base64 def from_base64(data): """ Utility function to base64 decode. :param data: The data to decode. :return: The decoded data. """ return base64.b64decode(data)
99c00438e3e106d35dabb83b3f3474e82884dfd0
55,300
from typing import List def insertion_sort(array: List[float]): """ Iterate through each number of the array starting from index 1 and check if it's bigger than the previous ones. If a bigger one is found, shift the number to the left. This algorithm runs faster than selection sort in best case scenarios compared to selection sort but slower in worst case scenarios. It's most effective with mostly sorted arrays. Time complexity: O(N^2) """ for i in range(1, len(array)): position_index = i - 1 temp_value = array[i] while position_index >= 0: if temp_value < array[position_index]: array[position_index + 1] = array[position_index] position_index -= 1 else: break array[position_index + 1] = temp_value return array
0134b0b3f449c5fc9a0fa12a0de52beebef86dca
55,310
import struct def keytag(dnskey): """ Given a dns.rdtypes.ANY.DNSKEY, compute and return its keytag. See rfc2535 section 4.1.6 for details. """ if dnskey.algorithm == 1: a = ord(dnskey.key[-3]) << 8 b = ord(dnskey.key[-2]) return a + b else: header = struct.pack("!HBB", dnskey.flags, dnskey.protocol, dnskey.algorithm) key = header + dnskey.key ac = 0 for i, value in enumerate(ord(x) for x in key): if i % 2: ac += value else: ac += (value << 8) ac += (ac >> 16) & 0xffff return ac & 0xffff
7bfe986fbd42d6978106062ee8f4e69edfb8d8c0
55,313
from typing import List from typing import Optional from pathlib import Path def determine_package_relevance(dep_src_paths: List[str], src_paths: Optional[List[str]] = None) -> bool: """Determine if the package is relevant to the given source paths. A package is relevant if any of its dependent source paths is in the given list of source paths. If no source paths are given, the default is True. Args: dep_src_paths: List of source paths the package depends on. src_paths: List of source paths of interest. """ if not src_paths: return True for src_path in (Path(x) for x in src_paths): for dep_src_path in (Path(x) for x in dep_src_paths): try: # Will throw an error if src_path isn't under dep_src_path. src_path.relative_to(dep_src_path) return True except ValueError: pass return False
f99236a7acffe031743c7d672c4b915fbceae9f7
55,317
def xy_sequencer(start_x, start_y, stroke_height, stroke_spacing, n_strokes, both_directions=True): """ xy_sequencer generates a list of tuples specifying the x,y coordinates of the path that the LU20 sampler must follow. This path has two forms. The first form, where both_directions=True is a sideways "S" shape in which the sample moves in vertical strokes and at the end of each stroke, increments in the horizontal axes. The second form, where both_directions=False looks like a comb. In this form, the sample moves vertically (following the magnitude and direction of stroke height) first. It then reverses this motion, returning to the original height BEFORE incrementing horizontally and beginning the next stroke. The last coordinate in the sequence guarantees that the sample returns to its original y axis position, with its x axis prepped for the next vertical stroke. Parameters ---------- start_x : float Initial X coordinate of the motion. The first stroke will begin immediately from this point. start_y : float Initial Y coordinate of the motion. The first stroke will begin immediately from this point. stroke_height : float Set the distance of the total y-axis stroke. stroke_spacing : float Set the horicontal (x-axis) distance between vertical strokes. n_strokes : float Number of vertical strokes to complete. both_directions : bool, optional Defaults to True. If this is True, follow the "S" shaped motion path. Otherwise follow the comb shaped motion path. Returns ------- list of tuples This is the list of (x,y) coordinates defining the path for the sample. The 0th indexed coordinate is the initial position. """ coord_list = [] coord_list.append((start_x,start_y)) direction = 1 for x in range(n_strokes): # vertical stroke new_x = coord_list[-1][0] new_y = coord_list[-1][1] + (direction * stroke_height) coord_list.append((new_x,new_y)) # flip direction for the next stroke if both directions are used if both_directions: direction *= -1 # second vertical stroke if only one direction is allowed if not both_directions: new_x = coord_list[-1][0] new_y = coord_list[-1][1] - (direction * stroke_height) coord_list.append((new_x,new_y)) # horizontal stroke new_x = coord_list[-1][0] + stroke_spacing new_y = coord_list[-1][1] coord_list.append((new_x,new_y)) # reset move for next set new_x = coord_list[0][0] + n_strokes * stroke_spacing new_y = coord_list[0][1] coord_list.append((new_x,new_y)) return coord_list
3c8fdeaf64a63325315952d2fd675aa85ffd1d9c
55,321
from typing import List def oc_n_project(project: str) -> List: """ Construct['oc', '-n', '<project name>'] """ return ['oc', '-n', project]
39560a822bc887df5113cd27cd0b330a6fdd7247
55,323
def create_nd_array(shape): """create n-dimensional array filled with 0""" if len(shape) == 0: return 0 res = [] for _ in range(shape[0]): res.append(create_nd_array(shape[1:])) return res
9dc4f4debc217d4f16f9ce1bf129621dcf5fdbc7
55,328
import math def _edge_lengths(points): """ Compute edge lengths from ordered points Assumes points are in order. Returns ------- list[float, ..] """ if len(points) != 4: raise ValueError('Expected 4 points instead of {}'.format(len(points))) lengths = [] for i in range(-2, 3): dx = abs(points[i][0] - points[i + 1][0]) dy = abs(points[i][1] - points[i + 1][1]) length = math.sqrt(dx ** 2 + dy ** 2) lengths.append(length) return lengths
34b29d0bbf6eff6335918d40f54e48642881c04d
55,331
def create_multiplicative_function(multiplier): """ return multiplication by a fixed value as a function :param multiplier: float value that the returned function multiplies by :return: function function that can multiply by the multiplier parameter when called """ return lambda input_value, time: multiplier * input_value
6a7c0f2c3ef9f8ea703a98e887cf3f600d54c964
55,332
def safe_get(collection, key, default=None): """Get values from a collection without raising errors""" try: return collection.get(key, default) except TypeError: pass try: return collection[key] except (IndexError, TypeError): pass return default
b96629f32fc11599d6ea902f5d00848ddc4aac84
55,333
def get_bench_name(func): """ Get a benchmark name, based on it's function (strip the `bench_` part.) """ return func.__name__.partition('_')[-1]
bcac4ec0581587468aa47c71e36e76fcc8388afe
55,337
def list_length(data): """Report the list length""" return len(data)
b0b9792caa18af4e3d4ad03c94260a833e61934c
55,344
def flip_dim2(ndarray): """ Flip x coordinate for training data :param ndarray: 4d array :return: flipped 4d array """ return ndarray[:, :, ::-1, ...]
3a0460085a0e7f6d659ce082988e7e60acfb80e0
55,346
import random def random_string(size): """Return a random string of size size.""" l = [chr(random.randrange(256)) for i in range(size)] return "".join(l)
7d0b15bb78be9eaa4716676b95150f40518b195d
55,347
def determine_filetype(filename): """ Return the filetype of a given file by examining the first few bytes. Adapted from pyart.io.auto_read.py script by : https://arm-doe.github.io/pyart/ The following filetypes are detected: * 'csv' * 'txt' * 'excel' * 'NETCDF3' * 'NETCDF4' * 'HDF4' * 'gzip' Parameters ---------- filename : str Name of file to examine. Returns ------- filetype : str Type of file. """ # read the first 12 bytes from the file try: f = open(filename, 'rb') begin = f.read(12) f.close() except TypeError: f = filename begin = f.read(12) f.seek(-12, 1) # CSV - no file signature as far as I know csv_signature = "csv" if filename[-3:] == csv_signature: return "CSV" # txt txt_signature = "txt" if filename[-3:] == txt_signature: return "TXT" # txt txt_signature = "TXT" if filename[-3:] == txt_signature: return "TXT" # xlsx xlsx_signature = b'PK\x03\x04\x14\x00\x08\x08\x08\x00ss' if begin == xlsx_signature: return "XLSX" # NetCDF3, read with read_cfradial if begin[:3] == b"CDF": return "NETCDF3" # NetCDF4, read with read_cfradial, contained in a HDF5 container # HDF5 format signature from HDF5 specification documentation hdf5_signature = b'\x89\x48\x44\x46\x0d\x0a\x1a\x0a' if begin[:8] == hdf5_signature: return "NETCDF4" # HDF4 file # HDF4 format signature from HDF4 specification documentation hdf4_signature = b'\x0e\x03\x13\x01' if begin[:4] == hdf4_signature: return "HDF4" # gzip filetype gzip_signature = b'\x1f\x8b' if begin[:2] == gzip_signature: return 'GZ' # zip filetype zip_signature = b'PK\x03\x04\x14\x00\x08\x00\x08\x00\x84y' if begin == zip_signature: return 'ZIP' # Cannot determine filetype return "UNKNOWN"
5347be9bf0115c32b3de40091b256972abc94e67
55,350
def check_json_keys(json_dict): """ Checks if all required keys are set :param json_dict: dict parsed from json :return: True if required key are set """ required_keys = ["command", "runtime", "weight", "actual_stretch", "graph_information"] required_graph_information = ["nodes", "edges", "directed", "weighted", "simple"] for key in required_keys: if key not in json_dict: return False for key in required_graph_information: if key not in json_dict["graph_information"]: return False return True
526d720d82f40f98e257c7c85db31f646b2132f7
55,357
def make_nonterminal(label, children): """returns a tree node with root node label and children""" return [label]+children
fb54cc63e2905f3d8f7c36d3628dedbeda4477ad
55,360
def goodbye_world() -> str: """Creates a command to print "Goodbye World".""" return 'echo "Goodbye Cruel World"'
1de7c8aabcc1e00cd8d0e95f3be1852707591caf
55,362
def count_first_choices(tally): """ Return dict giving count of all first choices in tally dictionary. Args: tally (dictionary): dictionary mapping ballots to nonnegative reals. Returns: (dict): dictionary mapping all choices that occur at least once as a first choice to count of their number of choices. Example: >>> tally = {('a', 'b'):1, ('c'):1, ():1, ('d'):1, ('a'):1} >>> count_first_choices(tally) {'a': 2, 'c': 1, 'd': 1} """ d = dict() for ballot, count in tally.items(): if len(ballot)>0: first_choice = ballot[0] if first_choice in d: d[first_choice] = count + d[first_choice] else: d[first_choice] = count return d
5e31a72d12b1b8823506258ba599679d030467d1
55,367
def placeholder2ix(placeholder): """ Extracts index from placeholder, e.g., #₂ => 2 """ return int(placeholder[1:])
48ae87dfc1381f64b745a556a33b99396ac3f114
55,372
def percentage_string(val): """ Returns a percentage-formatted string for a value, e.g. 0.9234 becomes 92.34% """ return '{:,.2%}'.format(val)
3e76ca3fec27637c4a6e51caaabace8517908c9a
55,378
def matches_filter(finding, request): """ Filters: * /scans/0/kb/?name= returns only vulnerabilities which contain the specified string in the vulnerability name. (contains) * /scans/0/kb/?url= returns only vulnerabilities for a specific URL (startswith) If more than one filter is specified they are combined using AND. :param finding: The vulnerability :param request: The HTTP request object :return: True if the finding (vulnerability) matches the specified filter """ name = request.args.get('name', None) url = request.args.get('url', None) if name is not None and url is not None: return (name.lower() in finding.get_name().lower() and finding.get_url() is not None and finding.get_url().url_string.startswith(url)) elif name is not None: return name.lower() in finding.get_name().lower() elif url is not None: return (finding.get_url() is not None and finding.get_url().url_string.startswith(url)) # No filter return True
4210051058aba24d8c0f05ac3c2e634bcd9ad8a2
55,380
import torch def compute_jacobian_on_map(x, y, forward_transform, eps=0.01): """ Computes the differentials dx/du, dy/du, dx/dv, dy/dv for the given projection function) using central differences. For spherical projections, (lon, lat = u, v) Returns (x.shape[0], x.shape[1], 2, 2) """ # Compute dx/dv, dy/dv x0, y0 = forward_transform(x - eps, y) x1, y1 = forward_transform(x + eps, y) dx_du = (x1 - x0) / (2 * eps) dy_du = (y1 - y0) / (2 * eps) # Compute dx/du, dy/du x2, y2 = forward_transform(x, y - eps) x3, y3 = forward_transform(x, y + eps) dx_dv = (x3 - x2) / (2 * eps) dy_dv = (y3 - y2) / (2 * eps) return torch.stack((torch.stack( (dx_du, dy_du), -1), torch.stack((dx_dv, dy_dv), -1)), -1)
3fc3c0467f44b11d86f1202f2076ffa9d265f25a
55,381
def pp_num(num): """ pretty prints number with commas """ s = '%d' % num groups = [] while s and s[-1].isdigit(): groups.append(s[-3:]) s = s[:-3] return s + ','.join(reversed(groups))
d249e973b52114c72fa684471d441b870b1fc6c4
55,383
def calculate_winner(rules: dict, left_attack: str, right_attack: str) -> str: """ Function to calculate the winner of combat Left player has priority and goes first :param rules: Dictionary of current rules :param left_attack: Attack type of priority player :param right_attack: Attack type of right player :return: Result of combat, either "left_wins", "right_wins", or "draw" """ priority_rules = rules[left_attack] if right_attack in priority_rules["beats"]: return "left_wins" elif right_attack in priority_rules["loses"]: return "right_wins" else: return "draw"
d596472ccd665ef5ebfe5ce6926c88a0b92ffc48
55,384
def split_epochs(epochs): """Savely split up one epochs file into two with adequate channel names.""" epochs_1 = epochs.copy().pick_channels([ch for ch in epochs.ch_names if ch[:4] == "sub1"], ordered=True) epochs_2 = epochs.copy().pick_channels([ch for ch in epochs.ch_names if ch[:4] == "sub2"], ordered=True) # rename the channels short_ch_map = {ch: ch[5:] for ch in epochs.ch_names} epochs_1.rename_channels({key:val for key, val in short_ch_map.items() if key[:4] == "sub1"}) epochs_2.rename_channels({key:val for key, val in short_ch_map.items() if key[:4] == "sub2"}) return epochs_1, epochs_2
e96f176801dfd8ad0790235f0c02b9eaacee02ff
55,385
import math def sqrt_bins(arr): """ Uses a rule of thumb to calculate the appropriate number of bins for an array. :param arr: An array containing numbers. :returns: An integer to serve as the number of bins. """ return int(math.sqrt(len(arr)))
18e508dba50c30ef29e4fd15d40d6f212b47313c
55,386
def select_countries(df, select_years=[str(x) for x in range(2005, 2019)], threshold=12, values='value'): """ Extract a list of countries that has sufficient data for the selected years. Args: df: (DataFrame) Created by "extract_data". select_years: (list) Years (str) to be included in analysis. threshold: (int) Threshold for dropping rows with missing values as implemented in the pandas .dropna() method. values: (str or list) Name of column that will be used for values in the pandas .pivot() method. Returns: pandas Series. """ # Filter for only relevant years df = df[df.year.isin(select_years)] # Find countries with sufficient data for select years df['missing'] = df['value'].isna() country_missing = df.groupby('country')['missing'].sum() countries = country_missing[country_missing <= 12].index.tolist() return countries
932b30f1df5ecc81a5389b71dc55730b585f7f24
55,391
from typing import Any def key_by_val(dictionary: dict, value: Any, ) -> Any: """ A helper function for getting a key from a dictionary corresponding to a certain value. Does not check for value unicity. Args: dictionary (dict): The dictionary. value: The value. Raises: ValueError: If the value could not be found in the dictionary. Returns: Any The key. """ for key, val in dictionary.items(): if val == value: return key raise ValueError(f'Could not find value {value} in the dictionary\n{dictionary}')
a92a9b897e90d6b345579ee005e08758e6031f23
55,395
import math def RastFunc(vardim, x, bound): """ Rastrigin function """ s = 10 * 25 for i in range(1, vardim + 1): s = s + x[i - 1] ** 2 - 10 * math.cos(2 * math.pi * x[i - 1]) return s
fac9dda66965a070f8460f457320c5d58baadee8
55,397
import math def regular_poly_circumrad(n_sides, side_length): """Circumradius of a regular polygon.""" return side_length / (2 * math.sin(math.pi / n_sides))
64e5bb400e3892e3d919176c1736606b35864b72
55,402
def get_gerrit_patch_from_parameters_action(action): """For the given action dictionary, return the Gerrit patch URL if present, else return None.""" for param in action['parameters']: if param['name'] == 'GERRIT_CHANGE_URL': return param['value'] return None
693ee60eeece01fce2f5feea9a221b3e1b2c4cb8
55,404
def not_none(passed, default): """Returns `passed` if not None, else `default` is returned""" return passed if passed is not None else default
e35064a7264698da90d54d1114d8519ad59a935b
55,414
import json def retrieve_ldap_password(secrets_manager_client, logger, ldap_password_secret_name): """ Retrieve LDAP service account password from secrets manager Expected password format: { "PASSWORD": "service_account_password" } Returns: str: plaintext ldap password """ logger.info("Retrieving LDAP service account password from Secrets Manager") secret_response = secrets_manager_client.get_secret_value( SecretId=str(ldap_password_secret_name) ) return json.loads(secret_response['SecretString'])['PASSWORD']
f67aa97169e641b5a6b206c4975dc6ed2d2112ca
55,417
def merge_verifyta_args(cfg_dict): """ Concatenate and format a string of verifyta arguments given by the .yaml configuration file. """ args = "" for k, v in cfg_dict.items(): if (v != None): args += " --" + k + " " + str(v) else: args += " --" + k return args[1:]
da7ef16ea458631acba22434a1467963848029e5
55,418
import pytz def get_country_code(request): """ Country code based on ISO 3166-1 (http://en.wikipedia.org/wiki/ISO_3166-1) :param request: Request Object :return: ISO Code of the country """ if 'X-AppEngine-Country' in request.headers: if request.headers['X-AppEngine-Country'] in pytz.country_timezones: return request.headers['X-AppEngine-Country'] return None
2274c2396f743f9676872754b7479e1093d5f792
55,421
from typing import Iterable import six def is_non_string_iterable(arg): """Return True if arg is an iterable, but not a string.""" return ( isinstance(arg, Iterable) and not isinstance(arg, six.string_types) )
ba42c61d0799f00105ebca719037ae7d45bd7ff3
55,422
def extract_measurement_names(data_file_src: str) -> list[str]: """Extract the row name (first column) from the provided plaintext data file.""" data_lines = data_file_src.splitlines() return [row.split(",")[0] for row in data_lines]
29bd64fbe29e7f7c62b4aedc8e0bb5e00ce5fb73
55,428
from datetime import datetime def parse_timestamp(time_str): """ There are three datetime string formats in eMammal, and some have an empty field. Args: time_str: text in the tag ImageDateTime Returns: datetime object, error (None if no error) """ if time_str == '' or time_str is None: return '', 'empty or None' try: res = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S') return res, None except Exception: try: res = datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S') return res, None except Exception: try: res = datetime.strptime(time_str, '%m/%d/%Y %H:%M') return res, None except: print('WARNING, time_str cannot be parsed {}.'.format(time_str)) return time_str, 'cannot be parsed {}'.format(time_str)
634d47fbcffe72164b62b2f46e80b8504db21b3d
55,429
import random def random_product(*args, **kwargs): """random_product(*args, repeat = 1) -> tuple Arguments: args: One or more iterables repeat(int): Number of times to repeat `args`. Returns: A random element from ``itertools.product(*args, repeat = repeat)``. Examples: >>> args = (range(2), range(2)) >>> random_product(*args) in {(0, 0), (0, 1), (1, 0), (1, 1)} True >>> args = (range(3), range(3), range(3)) >>> random_product(*args, repeat = 2) in product(*args, repeat = 2) True """ repeat = kwargs.pop('repeat', 1) if kwargs != {}: raise TypeError('random_product() does not support argument %s' % kwargs.popitem()) pools = map(tuple, args) * repeat return tuple(random.choice(pool) for pool in pools)
0a5c88f3ec341d9e55502fce795811ea60f89e31
55,430
def airtovac(wave): """ Convert air-based wavelengths to vacuum Parameters: ---------- wave: ndarray Wavelengths Returns: ---------- wavelenght: ndarray Wavelength array corrected to vacuum wavelengths """ # Assume AA wavelength = wave # Standard conversion format sigma_sq = (1.e4/wavelength)**2. #wavenumber squared factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq)) factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A # Convert wavelength = wavelength*factor return wavelength
23bd5afa62fb0fbee07cd73dc8f3a7454276db0c
55,436
def is_structure(structure): """Check if an object is a structure that javelin can understand. ase.atoms with have cell, get_scaled_positions and get_atomic_numbers attributes diffpy.structure with have lattice, xyz, and element attributes """ return (((hasattr(structure, 'cell') or hasattr(structure, 'unitcell')) and hasattr(structure, 'get_scaled_positions') and hasattr(structure, 'get_atomic_numbers')) or (hasattr(structure, 'lattice') and hasattr(structure, 'xyz') and hasattr(structure, 'element')))
ee72e5fd92334e5f6b8fc4d8463165d7c43e2d88
55,439
def dimensions(bound): """ Get the width and height of a bound :param bound: a bound tuple :return: a tuple containing the width and height of the ``bound`` i.e ``(width, height)`` """ return bound[2] - bound[0], bound[3] - bound[1]
d386cdb0fefb2ad46bd5b08826159c72d8ec1108
55,443
def Dup(x, **unused_kwargs): """Duplicate (copy) the first element on the stack.""" if isinstance(x, list): return [x[0]] + x if isinstance(x, tuple): return tuple([x[0]] + list(x)) return [x, x]
4fe65555155272a53ba247931ae7390db23b01a9
55,450
def str_to_felt(text): """Convert from string to felt.""" b_text = bytes(text, "ascii") return int.from_bytes(b_text, "big")
fc0674e3ddf84728f6d401e66cb82b95eab60d96
55,451
def check_int(integer, name): """Verify that an object is an integer, or coercible to one. Parameters ---------- integer : int The integer to check. name : str The name to print in the error message if it fails. """ if isinstance(integer, int): return integer # Else if it is a float: coerced = int(integer) if coerced != integer: raise ValueError(f"'{name}' must be an integer.") return coerced
e51e8fba1b13474c488038196173c96f1c74cd0d
55,452
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'): """ Returns list of dictionaries with keys: @prm{key} equal to one from list @prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}. """ ret = [] for dictionary in list_of_dicts: if dictionary.get(key) == None: raise Exception("No key: " + key + " in dictionary.") if dictionary.get(key) in list_of_key_values: ret.append(dictionary) return ret
65e1ee20e57ec2ea94e4bff4d5f898e947521335
55,454
def get_types(input_types, args, kwargs): """Extract the types of args, kwargs, optional kwargs and output. Parameters ---------- input_types : list List of inputs' input_types, including for optional inputs. args : tuple Args of a function. kwargs : dict Kwargs of a function. Returns ------- args_types : list Types of args. kwargs_types : list Types of kwargs. opt_kwargs_types : list Types of optional kwargs. is_scal : bool Boolean determining if the output is a scalar. """ len_args = len(args) len_kwargs = len(kwargs) len_total = len_args + len_kwargs args_types = input_types[:len_args] kwargs_types = input_types[len_args:len_total] opt_kwargs_types = [] is_scal = True if len(input_types) > len_total: opt_kwargs_types = input_types[len_total:] last_input_type = input_types[-1] if "output_" in last_input_type: if last_input_type != "output_scalar": is_scal = False opt_kwargs_types = input_types[len_total:-1] return (args_types, kwargs_types, opt_kwargs_types, is_scal)
2bde7681cfad1e0c697946e27d40a15d87d0602b
55,456
def prep_for_jinja(images): """ Prepares svg `images` for jinja rendering Parameters ---------- images : list-of-str Returns ------- outputs : list-of-tuple """ outputs = [] for im in images: with open(im, 'r') as src: content = src.read() outputs.append((im, content)) return outputs
ce7a2c90bbe7d2709a820812a83e1c6b97552bf0
55,457
import operator def _validate_int(k, name, minimum=None): """ Validate a scalar integer. This functon can be used to validate an argument to a function that expects the value to be an integer. It uses `operator.index` to validate the value (so, for example, k=2.0 results in a TypeError). Parameters ---------- k : int The value to be validated. name : str The name of the parameter. minimum : int, optional An optional lower bound. """ try: k = operator.index(k) except TypeError: raise TypeError(f'{name} must be an integer.') from None if minimum is not None and k < minimum: raise ValueError(f'{name} must be an integer not less ' f'than {minimum}') from None return k
60fd39fe6b696b086287f92d5d5c66d681dee2d3
55,459
def rebuild_params(values, keys): """ Build parameter dictionary from flattened values and ordered parameter names. For example, given the following: values = np.array([1., 5., .2, .5, 50., .1, 3, 3]), keys = [("Rt_fac", ["mu", "gamma", "b-a"]), ("R_fac", ["mu", "gamma", "b-a"]), ("consts", ["En", "Im"])] rebuild_params(values, keys) would return (in yaml representation for clarity): Rt_fac: mu: 1. gamma: 5. a: .9 b: 1.1 R_fac: mu: .5 gamma: 50. a: .45 b: .55 consts: En: 3 Im: 3 """ v_i = 0 d = {} for p0, p1s in keys: d[p0] = {} r = None mu = None for p1 in p1s: if p1 == "b-a": r = values[v_i] else: if p1 == "mu": mu = values[v_i] d[p0][p1] = values[v_i] v_i += 1 if r is not None and mu is not None: d[p0]["a"] = mu - r / 2 d[p0]["b"] = mu + r / 2 return d
0934e084b38c560907b612872dfc5c4c4fffac79
55,460
def getItemDict(node, recursive=True): """From a Houdini item, get the contents :param node: Node to get a content dict from :type node: hou.Node() :param recursive: In recursive mode, this will recurse on all sub-children items :type recursive: bool :return: A dictionary containing information on the node's contents :rtype: dict """ item = dict() nodeTypeName = node.type().name() item['__node_type_name__'] = nodeTypeName item['__node_position_x__'] = node.position().x() item['__node_position_y__'] = node.position().y() item['__node_color_rgb__'] = node.color().rgb() item['__inputs__'] = [i.name() for i in node.inputs()] for parm in node.parms(): item[parm.name()] = '{}'.format(parm.evalAsString()) if nodeTypeName == 'subnet' and recursive: item['__subnet_contents__'] = dict() for child in node.allSubChildren(): childName = child.name() item['__subnet_contents__'][childName] = getItemDict(child) return item
4d9da49ab6f40b55cb50fb06c05aefa28d0cc567
55,462
def append_wrapper(list): """ Returns a decorator that appends its argument to 'list' """ def decorator(x): list.append(x) return x return decorator
b510436bc1637d5941277dc8078989154380a3e1
55,464
def send_and_exhaust(iterator, arg, default): """Send a single value to a coroutine, exhaust it, and return the final element or a default value if it was empty.""" # Python's coroutine syntax is still a bit rough when you want to do # slightly more complex stuff. Watch this logic closely. output = default try: output = iterator.send(arg) except StopIteration: pass for output in iterator: pass return output
8dc99fc265b70ffdf1fc0cf5e28247917d4957b9
55,466
import inspect def get_mismatching_default_values(f1, f2, mapping=None): """Check that two functions have the same default values for shared parameters.""" # Additional mappings from f1 parameters to f2 parameters may be provided if mapping is None: mapping = {} params1 = inspect.signature(f1).parameters params2 = inspect.signature(f2).parameters mismatch = [] for f1_param_name in params1: # If the param is named differently in f2, rename f2_param_name = mapping[f1_param_name] if f1_param_name in mapping else f1_param_name # If the parameter does not appear in the signature of f2, there"s # nothing to do if f2_param_name not in params2: continue val1 = params1[f1_param_name].default val2 = params2[f2_param_name].default if val1 != val2: mismatch.append((f1_param_name, val1, f2_param_name, val2)) return mismatch
4085c3ae14209d17b4245abf0ee24db81bbc63cc
55,467
from functools import reduce def get_earliest_trade(trades): """ Gets the earliest trade from a list of trades. """ return reduce(lambda trade_a, trade_b: trade_a if trade_a.timestamp < trade_b.timestamp else trade_b, trades)
b0a3eb27effc42bfd0d2b83736ec2869353874db
55,468
def connection_gsm(gsm_client, network_apn, network_user, network_password): """ This function waits for the network to be available and opens a gprs connection Args: gsm_client (GsmClient): An object of the GsmClient class network_apn (string): The network apn network_user (string): The network user network_password (string): The network password Returns: bool: True if successful, False otherwise. """ if not gsm_client.waitForNetwork(): return False return gsm_client.gprsConnect(network_apn, network_user, network_password)
a9a889ba9e9494c80ba8082fab61c8d20357f56d
55,469
def delete_images_json(ibs, image_uuid_list): """ REST: Method: DELETE URL: /api/image/json/ Args: image_uuid_list (list of str) : list of image UUIDs to be delete from IBEIS """ gid_list = ibs.get_image_gids_from_uuid(image_uuid_list) ibs.delete_images(gid_list) return True
9eaffe35becfb020c5e76deb36278e7b3195c12b
55,471
from bs4 import BeautifulSoup def parse_response_error(html_text: str) -> str: """Parse HTML error response :param html_text: HTML error message. :return: Parsed error message. """ message = '' html = BeautifulSoup(markup=html_text, features="html.parser") if html.p: inner_html = BeautifulSoup(markup=html.p.text, features="html.parser") message = inner_html.text if inner_html.p is None else inner_html.p.text if "face_not_found" in message: message = "Could not find a face in the image." elif "multiple_faces" in message: message = "The image has more than one person." elif "quality_failed" in message: message = "The provided image does not have enough quality." else: message = "An error occurred. Please contact your systems administrator." print(f"ERROR: {html.text}") return message
6588b3a60b8045bb246f4e1ee48f3ead960be342
55,478
def int_representation(entity): """Returns a unique integer representation of a string (entity).""" return int.from_bytes(entity.encode(), "little")
a234558684c2a0e81298fb39b94043a0dc521fd4
55,484
def get_labels(ss): """Return two labels from a string containing "-" or two words starting with a capital. For example, the input string may be 'G-X', 'GX', 'Gamma-X', 'GammaX'. The output is always: ('G', 'X') or ('Gamma', 'X'). """ if '-' in list(ss): labels = ss.split('-') else: lss = list(ss) ixs = [i for i,c in enumerate(lss) if c != c.lower()] assert len(ixs) == 2, ixs assert ixs[0] == 0, ixs labels = [''.join(lss[:ixs[1]]), ''.join(lss[ixs[1]:])] return labels
e77b5496050f0f959bdb5df6cc9996a802c82493
55,485
import configparser def _open_reg_file(file_path): """ Open file at given path and return as config option.""" config = configparser.ConfigParser() config.read_file(open(file_path)) return config
154ada4b64bc34b9bfda095b17d462a0b975890e
55,486
def getProjectNameFromFile(f: str) -> str: """ Helper funcion that obtains a project name from a project file name """ f=f.replace("\\","/") # e.g. LondonMaeCompany_ldpproto_fe04304 -> LondonMaeCompany/ldpproto projectName = '/'.join(f.split("/")[1].replace("_","/").split("/")[:-1]) #print(projectName) return projectName
7589548e65367637a974e2910e1c81e0c83f5400
55,487
import secrets def randomHex(len: int): """Returns a random hex string at the given length.""" byteArray = [] for foo in range(len): byteArray.append(secrets.randbelow(255).to_bytes(1, "little")) return b"".join(byteArray).hex()
850339b9d8146fbd1142bde60af5be80f93ee761
55,489
import re def extract_jobs_flags(mflags): """Extracts make job flags from a list of other make flags, i.e. -j8 -l8 :param mflags: string of space separated make arguments :type mflags: str :returns: space separated list of make jobs flags :rtype: str """ regex = r'(?:^|\s)(-?(?:j|l)(?:\s*[0-9]+|\s|$))' + \ r'|' + \ r'(?:^|\s)((?:--)?(?:jobs|load-average)(?:(?:=|\s+)[0-9]+|(?:\s|$)))' matches = re.findall(regex, mflags) or [] matches = [m[0] or m[1] for m in matches] return ' '.join([m.strip() for m in matches]) if matches else None
b4b7c50f592f67cd93d8bf395c62931dd32fecc9
55,490
def template_check(template_id_hex): """Checks the template hex vlad and returns the template name corresponding to it Parameters ---------- template_id_hex: hex str of template id The template id retrieved from the parsed RAP packet Returns ------- The corresponding template name associated with the hex value """ return { '0001': 'SRC_IPV4', '0002': 'DST_IPV4', '0003': 'SRC_PORT', '0004': 'DST_PORT', '0005': 'PROTO', '0006': 'SRC_IPV6', '0007': 'DST_IPV6', '0008': 'IPV4_TOS', '0009': 'IPv6_FLOW_LABEL', '0010': 'PKT_COUNT', '0011': 'KBYTE_COUNT', '000a': 'CLASS_LABEL', '000b': 'MATCH_DIR', '000c': 'MSG_TYPE', '000d': 'TIME_TYPE', '000e': 'TIMEOUT', '000f': 'ACT_FLAG', '8000': 'ACT', '8001': 'ACT_PAR', '8002': 'CLASS_NAME', '8003': 'EXPORT_NAME', 'c000': 'CLASS_TAG', }.get(template_id_hex, '0000')
07a317f41967e0e1ef94c8965cbaa39ada488601
55,495
def chunk_string_increment(string, n): """ string: 'aabbccdd' n: 2 returns ['aa', 'aabb', 'aabbcc', 'aabbccdd'] """ # chunks = ['aa', 'bb', 'cc', 'dd'] chunks = [string[i:i+n] for i in range(0, len(string), n)] result = [] step = 1 # for index, e in enumerate(chunks): # end = step - 1 # chunk_inc = ''.join(chunks[0:end]) # result.append(chunk_inc) # step = (index + 1) * n for c in chunks: #end = step - 1 chunk_inc = ''.join(chunks[0:step]) result.append(chunk_inc) step = step + 1 return result
5743154f70f4159e49ffcf8d94bb63ad923f5d7c
55,503
def stream_resampling(stream, sampling_rate=100.0): """ To resample the input seismic data. Parameters ---------- stream : obspy stream input seismic data. sampling_rate : float required sampling rate in Hz, default is 100 Hz. Returns ------- stream : obspy stream output seismic data after resampling. """ for tr in stream: if tr.stats.sampling_rate != sampling_rate: if (len(tr.data) > 10): # perform resampling try: if tr.stats.sampling_rate > sampling_rate: # need lowpass filter before resampling tr.filter('lowpass',freq=0.5*sampling_rate,zerophase=True) tr.resample(sampling_rate=sampling_rate) except: try: tr.interpolate(sampling_rate, method="linear") except: stream.remove(tr) else: # remove the trave if it only contains too few data points stream.remove(tr) return stream
8858d5296646077dc18ffcaf890cd29c7813964f
55,504
def slice_pagination(page_range, current_page): """Slices paginator.page_range to limit which page links are displayed The logic for which page numbers are shown is based on Google Search's pagination. Examples: When current_page is within the first four pages 1 [2] 3 4 5 6 7 8 When current_page is within the last three pages 14 15 16 17 18 [19] 20 21 When current_page is somewhere in the middle 12 13 14 15 [16] 17 18 19 """ # Show first 8 pages if current_page in page_range[:4]: return page_range[:8] # Show last 8 pages if current_page in page_range[-3:]: return page_range[-8:] # Show 4 pages before and 3 pages after the current page return page_range[current_page - 5 : current_page + 3]
3565101d241b75e9bee4224d992fa67a9fbfaa8a
55,512
from datetime import datetime def get_age(year_of_birth): """ Returns the age of the person. """ return datetime.now().year - year_of_birth
0934a2ab17592055418e353f65ee189996f5bbab
55,517
def get_diff(throw, target): """ Determines the difference between a throw and a target E.g. between '111344' and '111144' Here, the difference is 1, dice to keep is '11144', to remove is '3' """ diff = 0 remove = [] keep = [] for i, j in zip(throw, target): if i != j: diff += 1 remove.append(i) else: keep.append(i) return diff, ''.join(keep), ''.join(remove)
6c0dfe395cc7fbfbf5f2e745cde9a61d81b045f8
55,518
import decimal import calendar def dt_to_decimal(utc): """Datetime to Decimal. Some databases don't store microseconds in datetime so we always store as Decimal unixtime. """ if utc is None: return None decimal.getcontext().prec = 30 return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + (decimal.Decimal(str(utc.microsecond)) / decimal.Decimal("1000000.0")))
2c2cf69d84d11d0a4843f87eb2929a2fc2a1c3b2
55,521
from typing import Optional def int_range_to_string( begin_optional: Optional[int] = None, end_optional: Optional[int] = None, unit: Optional[str] = None, default_begin_value: str = "", default_end_value: str = "", ) -> Optional[str]: """ Change range int to string for TFDS. Parameters ---------- begin_optional : Optional[int], optional, default=None Start value for string range. end_optional : Optional[int], optional, default=None End value for string range. unit : Optional[str], optional, default=None Unit for range. default_begin_value : str, optional, default="" Default value for begin. default_end_value : str, optional, default="" Default value for end. Returns ------- Optional[str] Range string for TFDS load. Examples -------- >>> int_range_to_string(begin_optional=30, unit="%") "30%:" >>> int_range_to_string(begin_optional=10, end_optional=50, unit="%") "10%:50%" >>> self.assertEqual(int_range_to_string(unit="%"), None) None """ result: Optional[str] = None if begin_optional or end_optional: begin_string: str = ( str(begin_optional) if begin_optional else default_begin_value ) end_string: str = str(end_optional) if end_optional else default_end_value if unit: begin_string = ( begin_string + unit if begin_string is not default_begin_value else begin_string ) end_string = ( end_string + unit if end_string is not default_end_value else end_string ) result = "{}:{}".format(begin_string, end_string) return result
af209774b269c6baf54b8849867c4d112dae7723
55,523
def prot_domains(ips_result): """ Reads the tsv output of an InterProScan run and returns a dict with gene name and minimal E-value found for any domain for any application for the protein. """ res = {} with open(ips_result) as f: for line in f: fields = line.split('\t') gene, e_value = fields[0], fields[8] e_value = float(e_value) if gene not in res: res[gene] = e_value else: res[gene] = min(res[gene], e_value) return res
2cdd244945d1d2c5a6f3b5142a973f031cf1bfe7
55,525
import math def comb(n,k): """Return the number of ways to choose k items from n items without repetition and without order. """ return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))
6281e710fb56a7e16fb332fdc0484749c6140139
55,527
import torch def moments_log(log_probs, log_levels, bias_levels, log_squared_levels, bias_squared_levels): """ Compute mean and variance of a probability tensor. Note: the probability is represented in log-space. Arguments: ---------- log_probs: an (N+1)-th order tensor of size [dim[0], ..., dim[N-1], levels] Marginal distribution of the weights/bias, represented in log-space. Prob[w[i_0, ..., i_{N-1}] = levels[l]] = exp(log_probs[i_0, ..., i_{N-1}, l]) / sum_k [exp(log_probs[i_0, ..., i_{N-1}, k])] log_levels: a vector of length [num_levels] Quantized values of the weights/bias, represented in log-space. log_levels[l] = log(levels[l] - bias_levels), where bias_levels < min_k levels[k]. bias_levels: float The bias to compute the log_levels. Typically, bias_levels = min_k levels[k] - 1. log_squared_levels: a vector of lenght [num_levels] Square of the quantized values of the weights/bias, represented in log-space. log_squared_levels = log(levels[l]^2 - bias_squared_levels), where bias_squared_levels < min_k log_squared_levels. bias_squared_levels: float The bias to compute the log_squared_levels. Typically, bias_squared_levels = min_k levels[k]^2 - 1. Returns: -------- mean: an N-th order tensor of size [dim[0], ..., dim[N-1]] Mean of the weights/bias. mean[i_0, ..., i_{N-1}] = E[w[i_0, ..., i_{N-1}]] var: an N-th order tensor of size [dim[0], ..., dim[N-1]] Variance of the weights/bias. var[i_0, ..., i_{N-1}] = V[w[i_0, ..., i_{N-1}]] """ # E[W] = \sum_x w(x) p(x) mean = torch.exp(torch.logsumexp(log_probs + log_levels, dim = -1) - torch.logsumexp(log_probs, dim = -1)) + bias_levels # E[W^2] = \sum_x w(x)^2 p(x) second_moment = torch.exp(torch.logsumexp(log_probs + log_squared_levels, dim = -1) - torch.logsumexp(log_probs, dim = -1)) + bias_squared_levels # V[W] = E[W^2] - E[W]^2 var = second_moment - mean ** 2 # returns a pair of N-th order tensors return (mean, var)
0e5392f8d55b096fe7944fedd0fb2643e09b3bad
55,528
def lookup(dlist, lkey, lvalue, rkey=None): """ Use a known key:value pair to lookup a dictionary in a list of dictionaries. Return the dictonary or None. If rkey is provided, return the value referenced by rkey or None. If more than one dict matches, raise an error. args: dlist: lookup table - a list of dictionaries lkey: name of key to use as lookup criteria lvalue: value to use as lookup criteria rkey: (optional) name of key referencing a value to return """ items = [d for d in dlist if lkey in d and d[lkey] == lvalue] if not items: return None if len(items) > 1: raise RuntimeError( "Data Error: lkey: {}, lvalue: {} - lookup matches multiple items in dlist".format(lkey, lvalue) ) if rkey: if rkey in items[0]: return items[0][rkey] return None return items[0]
addefaa38ef6ebcb917913ac4093251f0fdd963c
55,530
import wave import click def validate_wave(ctx, param, value): """ Validate the wave file by trying to open it and checking that its got a single channel. :param ctx:<class 'click.core.Context'> :param param:<class 'click.core.Option'> :param value:str :return:<class 'wave.Wave_read'> """ try: wave_read = wave.open(value) if wave_read.getnchannels() != 1: raise click.BadParameter('Only mono wave files are supported') return wave_read except wave.Error as e: raise click.BadParameter('Not a valid wave file. {}'.format(e.__str__()))
03b9154d7646eb4ba51f0d75963811609874b926
55,532