content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import random def sample_table(table, k=None): """ select a sample of size k from rows of a pyarrow Table. """ if k is None: return table return table.take(random.sample(range(len(table)), k=k))
14c3f9199ebf80ece7b0641c8352678fe022c752
94,892
def safe_sub(val1, val2): """ Safely subtracts two values. If either value is -1, then the result is -1 (unknown). """ return -1 if val1 == -1 or val2 == -1 else val1 - val2
e8300387ababbd85f983bf02b819b251c5e058af
94,902
import time, os, random def get_random_temp_dir_name(basedir="/tmp"): """ Creates a directory path with a unique random name as a subdirectory of the given directory. The directory name is derived from - the current time - the process id and - some random numbers """ return "%s/%s-%s-%s" % (basedir, time.time(), os.getpid(), random.randrange(1000000, 10000000))
8df0cf8fcdde156680a95393c863cf716be8c196
94,903
def thresh_label(label_list,threshold): """ Encode label as 0 or 1 depending on the given threshold """ return [0 if label < threshold else 1 for label in label_list]
fcbb011bda127edf8b14ae7041619807e67eb118
94,904
import torch def cho_solve_AXB(a, L, b): """Compute tensor $a C^{-1} b$ from cholesky factor. Args: a (torch.Tensor): M x N tensor. L (torch.Tensor): N x N lower triangular tensor where $L L^T = C$. b (torch.Tensor): N x L tensor. Returns: torch.Tensor: $a C^{-1} b$ """ left, _ = torch.triangular_solve(a.t(), L, upper=False) right, _ = torch.triangular_solve(b, L, upper=False) return torch.mm(left.t(), right)
5a43074506839f572aeea678fc7989a1a25f6bb2
94,905
def scrub_uuid(string): """Scrub any existing `"_uuid"` suffix from `string`.""" idx = string.rfind("_") if idx != -1: maybe_uuid = string[(idx + 1):] if len(maybe_uuid) == 32: try: _ = int(maybe_uuid, base=16) except ValueError: pass else: # yes, it was an uuid return string[:idx] return string
8afafb59ca6f2cdb60c42cd6fa138abcd702b3fe
94,906
def date_to_xmlschema(date): """Format a date for use in XML. date - The Time to format. Examples date_to_xmlschema(datetime.datetime.now()) => "2011-04-24T20:34:46+05:30" Returns the formatted String. """ return date.strftime('%Y-%m-%dT00:00:00+5:30')
f1b0b39fd77d9e934f42b3e8428cb3ae0315c63e
94,907
def _zero_triband(a, lower=0): """ Explicitly zero out unused elements of a real symmetric banded matrix. INPUTS: a -- a real symmetric banded matrix (either upper or lower hald) lower -- if True, a is assumed to be the lower half """ nrow, ncol = a.shape if lower: for i in range(nrow): a[i,(ncol-i):] = 0. else: for i in range(nrow): a[i,0:i] = 0. return a
0cb27b08b24618cb167dc9f331e944b87d55a8ec
94,918
import re def sterilize(text): """Sterilize input `text`. Remove proceeding and preeceding spaces, and replace spans of multiple spaces with a single space. Args: text (str): text to sterilize. Returns: sterilized message `text`. """ return re.sub(r'\s+', r' ', text.strip())
15cf5a304b0f3c6091f7721dcd73c4758e920cf9
94,925
import math def PowellSum(x, lb=-1., ub=1.): """ Description: ----------- Dimensions: d Input Domain: ------------ xi ∈ [-1, 1], i = 1, ..., d Global Minimum: -------------- f(x) = 0, x = (0, ..., 0) """ y = 0. for i in range(len(x)): y = y + math.fabs(x[i]) ** (i + 2) return y, lb, ub
a9574842dbb9f55ac9302b96adcdd4658c4902bf
94,927
def cross_product(vec1, vec2): """ Do cross product with two vector. @param { Vector2 } vec1 : first vector. @param { Vector2 } vec2 : second vector. @return { float } : Return the scalar. """ return vec1.get_x() * vec2.get_y() - vec1.get_y() * vec2.get_x()
f57777ababd30143e5f6198aa698690a6c6d5a08
94,934
def template_directory(test_config_directory): """Return path to directory containing several templates""" return test_config_directory / 'templates'
fef5db621da7f094109178dfa8c6e7b17bbd2fe1
94,944
def get_port(device): """ Get the port, as string, of a device """ return str(device.port)
2c68a38dc12c981718d50d44be16b0ca5d317c23
94,945
from datetime import datetime def print_time(t): """ prints time t in "11:23 AM" format :type t: float time in minutes :rtype : str time in specified format """ dt = datetime(2014, 8, 1, int(t/60), int(t%60), 0) return dt.strftime("%I:%M %p")
c47aad5c85c56c382ad7c6050a6b022d8933cc47
94,947
def wait_for_event(bidi_session, event_loop): """Wait until the BiDi session emits an event and resolve the event data.""" def wait_for_event(event_name: str): future = event_loop.create_future() async def on_event(method, data): remove_listener() future.set_result(data) remove_listener = bidi_session.add_event_listener(event_name, on_event) return future return wait_for_event
45fd51e0271b2ed8af010f18c3835e57a3979669
94,954
def build_dict(seq, key): """ Turn an unnamed list of dicts into a nammed list of dicts Taken from stackoverflow https://stackoverflow.com/questions/4391697/find-the-index-of-a-dict-within-a-list-by-matching-the-dicts-value """ return dict((d[key], dict(d, index=index)) for (index, d) in enumerate(seq))
22c4014325aec1814fa9bcf7d5f9d1b5e29cc597
94,959
def file_read_file_location_mod(mod_file, mod_dict={}): """ read the file that lists by file location modification lookups note: file location is the file dirname w/o leading common drive folders file format: new_label\tfile location previously known to be saved to: os.path.join(output_path, 'location_set_mapping.tab') Args: mod_file (str): filename path to location_set_mapping.tab mod_dict (dict): the modification dictionary to add the content read from the file to; directory vs file location label Returns: mod_dict (dict) whose keys are addresses and values are a label list """ with open(mod_file) as fHan: file_loc_mod_lines = fHan.readlines() for line in file_loc_mod_lines: try: line = line.strip('\n') new_label, loc_to_mod = line.split('\t') except ValueError: # occurs if line above is improperly formatted continue mod_dict[loc_to_mod] = [new_label] return(mod_dict)
ef716372597464e80eb2aca77eb7f990230cf2e2
94,962
from typing import OrderedDict def filter_words(word_to_idx, vectors, vocabulary): """Filter word vector data to vocabulary.""" filtered_to_idx, filtered_indices = OrderedDict(), [] for word, idx in word_to_idx.items(): if word in vocabulary: filtered_to_idx[word] = len(filtered_to_idx) filtered_indices.append(idx) return filtered_to_idx, vectors[filtered_indices]
dc545455d9a60b17191602d55866acabd8006d20
94,967
def get_controller(self, name): """ Retrieves a controller instance (inside the current system) with the provided name. The strategy for the retrieval of the controller is based upon the current conventions. :type name: String :param name: The name of the controller that should be retrieved, defined using underscore notation. :rtype: Controller :return: The controller retrieved with the provided name strategy from the currently associated system instance. """ return getattr(self.system, "%s_controller" % name)
4f1f7fcae76e30aad23b0dfb10ebf6bd4aabb076
94,971
import re def collapse_slashes(path): """Replaces repeated path separators ('/') with a single one.""" return re.sub('//+', '/', path)
db64932d7959a77907c3ed07bff555c0cda442b9
94,976
def get_column_names( data, column_t_index, column_r_index, ): """Function that gets column indexes of T and R data in dataframe. Args: data (list): list of DataFrames of raw data column_t_index (str): index of column containing temperature data column_r_index (str): index of column containing resistance data Returns: column_t_name (str): name of column containing temperature data column_r_name (str): name of column containing resistance data """ # Getting column names temp_data = data[0] data_names = temp_data.columns column_t_name = data_names[column_t_index] column_r_name = data_names[column_r_index] print('\nSelected columns: "' + column_t_name + '", "' + column_r_name + '"') return column_t_name, column_r_name
23d642e630dc54ec4df1d1340a0fb63baf4bcf95
94,977
def update_price(price_input): """ Return price as integer by removing: - "€" symbol - "," to convert the number into float first (e.g. from "€100,000.00" to "100000.00") """ price_input = price_input.replace("€", "") price_input = float(price_input.replace(",", "")) return int(price_input)
cf7a2f803bd599e705448fd5499db54e1e63e829
94,978
from dateutil import tz def utc_to_local(dt): """Convert naive datetime to local time zone, assuming UTC.""" return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
98d1d5930ab5341c99b6df3fa0787277ee931487
94,979
from typing import List from typing import Tuple from typing import Set def is_unit_demand(bids: List[Tuple[Set[int], float]]): """ Given bids, returns true if bidder is unit-demand (one item per bid). """ return all(len(items) <= 1 for items, bid in bids)
d91dc295825c4b23cbe5a1884597ad270522b1cc
94,980
def bash_string(s): """Wrap a string in double quotes and escape things inside.""" s = s.replace('\\', '\\\\') # \ -> \\ s = s.replace('\"', '\\\"') # " -> \" return '\"{}\"'.format(s)
3283132517b5b0a857c2d46c265343d18b7c4b9c
94,985
import itertools def powerset(iterable): """ Computes the powerset of an iterable. See https://docs.python.org/2/library/itertools.html. """ as_list = list(iterable) return itertools.chain.from_iterable( itertools.combinations(as_list, r) for r in range(len(as_list) + 1) )
4a77ddefe22d4cd1b934ddf33eb5f83afb4a34c1
94,988
def is_number(string): """ check whether a string can be converted to a number Args: string: value as a string, could be a number Returns: True/False for whether the value can be converted to a number """ try: number = float(string) except ValueError: return False return True
b3c7575a326c10481d0c9501f0fcda59b74e6ad7
94,990
def pka(reactants, conditions): """Compute the pka of every molecule in the reactants. The pka, as well as the id of the "pka_point" is stored in the molecule. The "pka_point" is the id of the Hydrogen most likely to be donated, or the id of the atom most likely to accept a Hydrogen. The pka is based off of the pka_point of the atom. Parameters ---------- reactants: list[Molecule] A list of reactant molecules. conditions: dict Dictionary of conditions. Notes ----- Eventually this will be computed, however right now it just pulls specified information from the conditions dict. """ if 'pkas' in conditions and 'pka_points' in conditions: for reactant in reactants: id_ = reactant.id reactant.pka = conditions['pkas'][id_] reactant.pka_point = conditions['pka_points'][id_] return True return False
7ef4555d46e6a8817c85160fcc50246451535336
94,991
def calc_num_overlap_samples(samples_per_frame, percent_overlap): """Calculate the number of samples that constitute the overlap of frames Parameters ---------- samples_per_frame : int the number of samples in each window / frame percent_overlap : int, float either an integer between 0 and 100 or a decimal between 0.0 and 1.0 indicating the amount of overlap of windows / frames Returns ------- num_overlap_samples : int the number of samples in the overlap Examples -------- >>> calc_num_overlap_samples(samples_per_frame=100,percent_overlap=0.10) 10 >>> calc_num_overlap_samples(samples_per_frame=100,percent_overlap=10) 10 >>> calc_num_overlap_samples(samples_per_frame=960,percent_overlap=0.5) 480 >>> calc_num_overlap_samples(samples_per_frame=960,percent_overlap=75) 720 """ if percent_overlap > 1: percent_overlap *= 0.01 num_overlap_samples = int(samples_per_frame * percent_overlap) return num_overlap_samples
52ab271945f38749a06543e5b447486ba623be5e
94,992
def asdict(**items): """Shorthand to create a dictionary using a `key=value` syntax instead of the standard `'key':value`.""" return items
62cfceff5e464755d888c0aa1fed81767e04e2e9
94,993
from datetime import datetime from dateutil import tz def _localized_time(dt): """Convert a datetime object to local time""" if isinstance(dt, datetime): return dt.astimezone(tz.tzlocal()) else: return dt
59b9948e215a451a71d2e47d6ae1f1164e6ae034
94,994
def select_targets_by_prefix(cells, prefix): """given all cells and a parameter name that may have a prefix, select the cells it refers to: if the param name starts with 'e_' or 'i_' it will be a subpopulation, otherwise return all """ for subpopulation in ['e', 'i']: if prefix.startswith(subpopulation + '_'): return cells[cells.ei_type == subpopulation] return cells
6fa349c347b74c23de8a49348e5e55517d586ddb
94,996
from typing import Optional def get_probability(row: dict) -> Optional[int]: """ Estimate confidence percentage based on the `ambiguity_score`. Per pangolin docs, this is basically the number of lineage-defining sites that had to be imputed from the reference sequence. Round and multiply by 100 --> percentage for easier user comprehension. """ if row["ambiguity_score"]: return round(float(row["ambiguity_score"]) * 100) elif "Assigned using designation hash" in row["note"]: return 100 else: return None
bdf23ae0f684c725ad28b36a0ead1c00b1c66661
94,997
def missing_balance(unbalanced_items, actual_split): """ Get the required portions to complete balanced meals Args: unbalanced_items (dict): dictionary item_categ --> list of balanced portions actual_split (dict): dictionary item_categ --> ideal individual meal split Returns: required_items (dict): dictionary item categ --> missing quantity """ # Determine the category availability available_portions = {item_categ: len(unbalanced_items.get(item_categ, [])) for item_categ in actual_split} max_portions = max(available_portions.values()) # Determine what we still need required_portions = {item_categ: max_portions - categ_count for item_categ, categ_count in available_portions.items()} return required_portions
d0f6a3a9bfbf283995fd5d25d73e3cb8a6395c74
95,001
def get_health_status(item): """ extract health status for document :param item: source of information :return: list with health statuses """ health_status = list() if 'health status' in item['characteristics']: key = 'health status' elif 'health status at collection' in item['characteristics']: key = 'health status at collection' else: return health_status for status in item['characteristics'][key]: health_status.append( { 'text': status['text'], 'ontologyTerms': status['ontologyTerms'][0] } ) return health_status
271a9b6aacc9dd5408b6784be3509ec5ad0075a2
95,006
import re def remove_job_status_rows(df): """ Removes all of the rows representing calls to the jobs status endpoint. For async requests we have a row for each request that pings the job status route. This row is not useful for capturing performance information and when there are many async requests in a run this can lead to the charts having way too many values on the x axis and making the charts difficult to use and filled with information we do not want. Arguments: df {pandas.DataFrame} -- The dataframe to act on. Returns: {pandas.DataFrame} -- The original dataframe with all job status rows removed. """ job_status_regex = re.compile(r'\/jobs.*') jobs_rows_filter = df['Name'].str.contains(job_status_regex) return df[~jobs_rows_filter]
58f060d0753800653e89b2f5aaa07d1f6fbd8e40
95,011
from math import pi def find_volume(r): """Returns the volume of a sphere with a given radius """ return (4/3)*pi*(r**3)
e13b52c4c7b21ad5da29171e56d25f659b815383
95,014
import socket def reverse_dns(address: str) -> str: """Attempt to resolve an IP address to its DNS name. :param address: The IP address to resolve :return: The IP's DNS name as a string """ try: result = socket.gethostbyaddr(address)[0] except socket.herror: result = address return result
36da5e3f4885cfe884f4b89c8f092e9d081697d6
95,016
def count_max_tags(arrival_interval: float, time_in_area: float) -> int: """ Get maximum number of tags in area. Parameters ---------- arrival_interval : float time_in_area : float Returns ------- n : int """ return int(time_in_area // arrival_interval) + 1
cfb27f581f3cfc78ba8803aa4a7b007636a38710
95,018
def create_example(previous_lines, line, file_id): """Creates examples with multi-line context The examples will include: file_id: the name of the file where these lines were obtained. response: the current line text context: the previous line text context/0: 2 lines before context/1: 3 lines before, etc. """ example = { 'file_id': file_id, 'context': previous_lines[-1], 'response': line, } example['file_id'] = file_id example['context'] = previous_lines[-1] extra_contexts = previous_lines[:-1] example.update({ 'context/{}'.format(i): context for i, context in enumerate(extra_contexts[::-1]) }) return example
8afaeef565dd591d7c718753c09552d26cff583a
95,021
def _update_with_csrf_disabled(d=None): """Update the input dict with CSRF disabled.""" if d is None: d = {} d.setdefault('meta', {}) d['meta'].update({'csrf': False}) return d
66c45822065e4edbf5fea73081df2d14c094d5a6
95,024
def transform(pts, trans): """ Applies the SE3 transformations, support torch.Tensor and np.ndarry. Equation: trans_pts = R @ pts + t Input - pts: [num_pts, 3] or [bs, num_pts, 3], pts to be transformed - trans: [4, 4] or [bs, 4, 4], SE3 transformation matrix Output - pts: [num_pts, 3] or [bs, num_pts, 3] transformed pts """ if len(pts.shape) == 3: trans_pts = trans[:, :3, :3] @ pts.permute(0,2,1) + trans[:, :3, 3:4] return trans_pts.permute(0,2,1) else: trans_pts = trans[:3, :3] @ pts.T + trans[:3, 3:4] return trans_pts.T
0e21ae0d7d3c327acb72f7b83f3bfbb785ab624d
95,027
def decimal_to_binary(decimal): """Convert an integer into a binary string. E.g. 5 -> '101'.""" return format(decimal, 'b')
ba0a51662f100c082e9c30f91905c5eb5d96be3b
95,028
import copy def rename(object, complex): """ Takes a PDB object and a complex. Returns a copy of the PDB object so that no chain in the object has the same id as a chain in the complex. Renaming is done checking non-used chain names in the complex, starting form ASCII character A. """ # ASCII A-Za-z encoded on decimal 65 to 122 object_copy = copy.deepcopy(object) for chain in object_copy.get_chains(): N = 65 while chain.get_id() in [a.get_id() for a in complex.get_chains()]: try: chain.id = chr(N) except ValueError: pass N += 1 return object_copy
e7da348795e3bb42702b1ca740fdcc09e1ce25aa
95,029
def lookup_with_backup(mapping: dict, key: object, backup_key: object) -> object: """Return the corresponding value of key in mapping. If key is not in mapping, then return the corresponding value of of backup_key in mapping instead. This assumes that at least one of key and backup_key are a key in map. NOTE: the type contract here uses "object" for key, backup_key, and the return type. We've included this so that you do *not* need to write any preconditions to check for the type of the keys or corresponding values in map. Preconditions: - key in mapping or backup_key in mapping >>> example_dict = {'Burger': 5.0, 'Fries': 3.0} >>> lookup_with_backup(example_dict, 'Fries', 'Burger') 3.0 >>> lookup_with_backup(example_dict, 'Cheeseburger', 'Burger') 5.0 """ return mapping[key] if key in mapping else mapping[backup_key]
14a1c8a14e842377666c55ac4d131404dd8c2e48
95,030
def get_f_H_i_1(region, glass_spec_category): """天窓等の屋根又は屋根の直下の天井に設置されている開口部の暖房期の取得日射熱補正係数 Args: region(int): 省エネルギー地域区分 glass_spec_category(str): ガラスの仕様の区分 Returns: float: 天窓等の屋根又は屋根の直下の天井に設置されている開口部の暖房期の取得日射熱補正係数 """ # 表1(a) 天窓等の屋根又は屋根の直下の天井に設置されている開口部の暖房期の取得日射熱補正係数 table_1_a = [ (0.90, 0.91, 0.91, 0.91, 0.90, 0.90, 0.90, None), (0.85, 0.86, 0.86, 0.87, 0.85, 0.85, 0.85, None), (0.83, 0.84, 0.84, 0.85, 0.83, 0.84, 0.83, None), (0.85, 0.86, 0.86, 0.87, 0.85, 0.85, 0.85, None), (0.82, 0.83, 0.83, 0.84, 0.82, 0.82, 0.82, None), (0.82, 0.83, 0.83, 0.84, 0.82, 0.82, 0.82, None), (0.80, 0.81, 0.81, 0.82, 0.80, 0.80, 0.80, None) ] return table_1_a[glass_spec_category - 1][region - 1]
a6789c0295640014d89f66fd921ff65b8f6b3ab8
95,031
def binary_search(array, key) -> int: """ Binary search algorithm. :param array: the sorted array to be searched. :param key: the key value to be searched. :return: index of key value if found, otherwise -1. >>> array = list(range(10)) >>> for index, item in enumerate(array): ... assert index == binary_search(array, item) >>> binary_search(array, 10) == -1 True >>> binary_search(array, -1) == -1 True """ left = 0 right = len(array) - 1 while left <= right: mid = (left + right) >> 1 if key == array[mid]: return mid elif key > array[mid]: left = mid + 1 else: right = mid - 1 return -1
2a44aea6ee3d058847b4217f05649440ee4dd793
95,035
def at(line): """count a and t in dna sequence""" count = 0 for letter in line: if (letter == "A") or (letter == "T"): count += 1 return count
9a2010b17b74619e908088e3908c4f2ccb63b671
95,036
def renumber_filename(filename, i, zfill_len): """ Discard the numbered part of the filename and renumber according to its new position in the list of replacement rows to be inserted (passed as `i`). E.g. from ".../segmented/output_035_6.wav" to ".../segmented/output_035_012.wav" when `i` = 12 and `zfill` = 3. """ filename_prefix = filename.stem.rpartition("_")[0] new_number = str(i).zfill(zfill_len) new_filename = f"{filename_prefix}_{new_number}{filename.suffix}" #print(f"Renaming {filename.name} as {new_filename} ({i}:{zfill_len} -> {new_number})") return filename.parent / new_filename
3752d130f7ad8778d3b5c8c20cd9a4dac14df400
95,037
def format_time(row): """Format the date and time into ISO 8601 format YYYY-MM-DDThh:mm:ss+00:00""" # ISO 8601 format YYYY-MM-DDThh:mm:ss+00:00 return row['date_time'].strftime('%Y-%m-%dT%H:%M:%S+00:00')
0c406bf082bdac5dd1d7cd67c9b2736f4b6294cd
95,046
def cria_posicao(col, ln): # str x str -> posicao """ Recebe duas cadeias de carateres correspondentes a coluna c e a linha l de uma posicao e devolve a posicao correspondente, se ambos os argumentos forem validos. :param col: Coluna, pode ser 'a', 'b' ou 'c' :param ln: Linha, pode ser '1', '2' ou '3' :return: Posicao do tabuleiro, representada por um tuplo com dois elementos, o primeiro sendo a coluna e o segundo a linha, que sao ambos inteiros de 0 a 2, dependendo da posicao que se quer representar. """ if col == 'a': col = 0 elif col == 'b': col = 1 elif col == 'c': col = 2 else: raise ValueError('cria_posicao: argumentos invalidos') if ln == '1': ln = 0 elif ln == '2': ln = 1 elif ln == '3': ln = 2 else: raise ValueError('cria_posicao: argumentos invalidos') return col, ln
b594c6bccc7fa497ea14592cf6ca92d2372ac695
95,050
import torch def add_alpha(img): """ Adds a totally opaque alpha channel to a tensor, whose last axis corresponds to RGB color. Parameters ---------- img : torch.Tensor The RGB image. Returns ------- torch.Tensor The resulting RGBA image. """ alpha_shape = list(img.shape) alpha_shape[-1] = 1 return torch.cat([img, torch.ones(alpha_shape, dtype=img.dtype)], dim=-1)
c93ba15875f26dcb99fc03df898d53dd8177c168
95,053
import re def ipv6(value: str): """ Validator IPv6 address Example Result: [2001:0db8:0000:0000:0000:8a2e:0370:7334, 2001:db8::8a2e:370:7334] Detail: https://en.wikipedia.org/wiki/IPv6 """ _ipv6_pat = r'(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,4}:[^\s:](?:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))|(?:::(?:ffff(?::0{1,4}){0,1}:){0,1}[^\s:](?:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))|(?:fe80:(?::(?:(?:[0-9a-fA-F]){1,4})){0,4}%[0-9a-zA-Z]{1,})|(?::(?:(?::(?:(?:[0-9a-fA-F]){1,4})){1,7}|:))|(?:(?:(?:[0-9a-fA-F]){1,4}):(?:(?::(?:(?:[0-9a-fA-F]){1,4})){1,6}))|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,2}(?::(?:(?:[0-9a-fA-F]){1,4})){1,5})|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,3}(?::(?:(?:[0-9a-fA-F]){1,4})){1,4})|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,4}(?::(?:(?:[0-9a-fA-F]){1,4})){1,3})|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,5}(?::(?:(?:[0-9a-fA-F]){1,4})){1,2})|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,6}:(?:(?:[0-9a-fA-F]){1,4}))|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){1,7}:)|(?:(?:(?:(?:[0-9a-fA-F]){1,4}):){7,7}(?:(?:[0-9a-fA-F]){1,4}))' # pylint: disable=C0301 return re.findall(_ipv6_pat, value)
e0f2e0b2fa537aab2d63c9670f5d2696b1783a75
95,054
def make_data_object_name( dataset_name, year, month, day, hour, realization, forecast_period): """Create a string formatted to give a filename in the MOGREPS dataset.""" template_string = "prods_op_{}_{:02d}{:02d}{:02d}_{:02d}_{:02d}_{:03d}.nc" return template_string.format( dataset_name, year, month, day, hour, realization, forecast_period)
e544d213a21b34c4593a0248ed04862124bfaed8
95,055
def decode_kv_read(proto_kv_read): """Decodes Key Value Read :param proto_kv_read: Object of the key value with read contents :return: deserialized key value read contents with block num and tx_num """ kv_read = {} kv_read['key'] = proto_kv_read.key proto_version = proto_kv_read.version if proto_version: kv_read['version'] = {} kv_read['version']['block_num'] = str(proto_version.block_num) kv_read['version']['tx_num'] = str(proto_version.tx_num) else: kv_read['version'] = None return kv_read
a4dd599ed93f31b7a707fd5ba8b884bce24767cb
95,056
def get_image_url(self, image_id): """ Retrieves the image url given an image id """ return self.image_infos[image_id]['image_url']
227d3d7d800704f55d342b093116a6211f4b1568
95,058
def evaluate_model(model, test_set): """ Evaluates trained model on test set :param model: Trained keras model :param test_set: (X_test, y_test) :return: (loss, accuracy) """ (X_test, y_test) = test_set loss, acc = model.evaluate(X_test, y_test) return loss, acc
8d0c0632ab66ce1b8b3b0e2ca49227544315c321
95,060
def not_filter(filter_predicate): """Negate a filter predicate with the logical NOT.""" return lambda properties: not filter_predicate(properties)
3cf0dd629e11125e128c066dc44db06984324d64
95,061
def check_hit_angle(self): """Returns how well the car is facing the ball in degrees""" future_ball_location = self.target_location.flat() ball_contact_point = self.offset_ball_location.flat()-future_ball_location car_location = self.car_contact_point.flat()-future_ball_location hit_angle = ball_contact_point.ang_to(car_location) * 57.2958 return hit_angle
f03df227220ef54a29293b60d60be8a78068489f
95,066
def make_box(center, width, height, img_shape, xywh=True): """Make a box within an image range Parameters ---------- center : list[int] ot tuple(int) the center of the box. (y, x) order. width : int box width height : int box height img_shape : list[int] ot tuple(int) image shapes. (width, height) order. xywh : bool, optional if ture, return [left_x, top_y, width, height], if false, return [left, top, right, bottom], by default True Returns ------- box : list[int] [left_x, top_y, width, height] or [left, top, right, bottom], """ x1 = int(max(center[1] - width / 2., 0)) y1 = int(max(center[0] - height / 2., 0)) x2 = int(min(center[1] + width / 2., img_shape[1] - 1)) y2 = int(min(center[0] + height / 2., img_shape[1] - 1)) w = x2 - x1 h = y2 - y1 if xywh: return [x1, y1, w, h] else: return [x1, y1, x2, y2]
90099dad3ba055350e1111a9925c3ef368a9143d
95,069
def coins_annually(xmr_per_day: float, precision: int) -> float: """ Computes how many XMR coins you can mine annually. Formula: days_in_year = 365 xmr_per_day * days_in_year :param xmr_per_day: Float. Amount of XMR you can mine per day. :param precision: Int. Precision of computing. :return: Float. Amount of XMR you get per year. """ return round(xmr_per_day * 365, precision)
eb2c9ae9d268af63a032d21bd002db55a28e2b34
95,070
def is_apt(target): """Returns True if the target exports an annotation processor.""" return target.has_label('apt')
7a6df26f14439db04b1c7b6ec7f5d122131033f1
95,073
def get_tax_branch(taxid, tax_tree): """ Return a list of all parents of a given taxid. The list begins with the taxid and ends with the root of the tree. """ branch = [] while taxid != 0: branch.append(taxid) # Gets the parent of current tax id taxid = tax_tree[taxid] return branch
674cd596da354a46244f50c7f5eb4ab43fc321f0
95,078
def _best_rect_ratio_match(bricks, size, margin): """ Sort and filter list of bricks based on target ration and margin 'margin' determines how easily a brick passes filtering. Results are sorted from best matching to worst before returning the list. """ def _ratio(_size): if _size[1] == 0: return 0 else: return _size[0] / _size[1] target_ratio = _ratio(size) bricks = filter(lambda x: abs(_ratio(x['dimensions']) - target_ratio) < target_ratio * margin, bricks) return sorted(bricks, key=lambda x: abs(_ratio(x['dimensions']) - target_ratio))
45da03537fabc4788f0505fac1890cfff05c61ab
95,083
def get_device_system_ports(cfg_facts): """Returns the system ports from the config facts as a single dictionary, instead of a nested dictionary. The ansible module for config facts automatically makes a 2 level nested dictionary when the keys are in the form of part1|part2|part3 or part1|part2. The first dictionary is keyed as "part1" and the nested dictionary is the remainder of the key with the value. This function returns a flat dictionary with the keys restored to their values from the files. Args: cfg_facts: The "ansible_facts" output from the duthost "config_facts" module. Returns: The system port config facts in a single layer dictionary. """ sys_port_slot_dict = cfg_facts['SYSTEM_PORT'] merge_dict = {} for slot in sys_port_slot_dict: for port in sys_port_slot_dict[slot]: merge_dict[slot + "|" + port] = sys_port_slot_dict[slot][port] return merge_dict
dd7847a4f99a5c31a8cc309f59918dc2e5a62c55
95,084
def update(self) -> int: """Updates the edge server. Returns: int: Update duration. """ self.updated = True update_duration = self.patch + self.sanity_check # Storing update metadata self.update_metadata = { "maintenance_batch": self.simulator.maintenance_batches, "duration": update_duration, } return update_duration
08c5b6eb91c1cf89ee861a090a81d28142413f7a
95,089
def extract_fasta_file(uri: str) -> str | None: """ Returns all characters after the last slash '/'. :param uri: The URI string to process. :return: Returns any characters after the last character or None if no slash is present. """ return uri[uri.rindex("/") + 1 :] if "/" in uri else None
ee8c99e85e15488ba49c90998801b8683a1a12f7
95,092
import torch def sequence_mask(lengths, max_len=None): """Creates a boolean mask from sequence lengths. Args: lengths (torch.Tensor): lengths with shape (bs,) max_len (int, optional): max sequence length. if None it will be setted to lengths.max() Returns: torch.Tensor: (bs, max_len) """ if max_len is None: max_len = lengths.max() aranges = torch.arange(max_len).repeat(lengths.shape[0], 1) aranges = aranges.to(lengths.device) return aranges < lengths.unsqueeze(1)
d388702b98d2e9059266abc0713b5a8302a543d6
95,094
def for_stmt_defines_one_name(for_stmt): """ Returns True if only one name is returned: ``for x in y``. Returns False if the for loop is more complicated: ``for x, z in y``. :returns: bool """ return for_stmt.children[1].type == 'name'
4358e8aac381eb7f0badbf939263423ba56894ce
95,112
import glob def listFiles (wildcardstr): """ List the files names in the current directory using the wildcard argument eg te.listFiles ('*.xml') :param wildcardstr: WIld card using during the file search :returns: list of file names that match the wildcard """ return glob.glob (wildcardstr)
74633e0d7a8c87755fde0325a52d5e14d9c4e237
95,113
def _select_closest(to_search_df, target_series): """ Find row in ``to_search_df`` that is closest to the target array. Here, 'closest' is in the root-mean squared sense. In the event that multiple rows are equally close, returns first row. Parameters ---------- to_search_df : :obj:`pd.DataFrame` The rows of this dataframe are the candidate closest vectors target_series : :obj:`pd.Series` The vector to which we want to be close Returns ------- dict Metadata of the closest row. """ if len(target_series.shape) != 1: raise ValueError("Target array is multidimensional") if target_series.shape[0] != to_search_df.shape[1]: raise ValueError( "Target array does not match the size of the searchable arrays" ) closeness = [] for label, row in to_search_df.iterrows(): rms = (((target_series - row) ** 2).mean()) ** 0.5 closeness.append((label, rms)) # Find the minimum closeness and return the index of it labels, rmss = list(zip(*closeness)) to_return = rmss.index(min(rmss)) return dict(zip(to_search_df.index.names, labels[to_return]))
566c7a4e97cc972f0c0606183c2333fc598613da
95,114
def percent(value): """Formats the given float as a percentage.""" return "%f%%" % (value * 100)
e4408986346224fef0ee14cdb07cccd3d69c715e
95,116
def binary_search(arr, val): """Takes in a sorted list and a value. Preforms the Binary Search algorythem to see if the value is in the lsit.""" # Set the values to the end of the list left,right = 0,len(arr) while not right <= left: mid = (right + left) // 2 if val > arr[mid]: left = mid+1 elif val < arr[mid]: right = mid else: return mid return -1
49e25dd5ba4e9e92d6b1225141efa74f48ee19b1
95,118
def replicate(x, n): """ Return a sequence containing n copies of x. >>> replicate(True, 3) [True, True, True] If n=0, this will return the empty list. >>> replicate(101, 0) [] """ return [x]*n
2fa0f8afdfdd29a7b9d9b467106b3b1a30334804
95,120
def compare_lists(list1, name1, list2, name2): """compare two lists and check for different or missing entries print out missing or different entries in list 2 compared to list 1 Parameters ---------- list1: list name1: str - name to be printed in comparison list2: second list name2: str - name to be printed in comparison Returns ------- passed: boolean, returns True if files match """ no_match = [x for x in list2 if x not in list1] missing = [x for x in list1 if x not in list2] passed = True if len(no_match) > 0: for x in no_match: print('{0} is in {2} but not in {1}'.format(x, name1, name2)) passed = False elif len(missing) > 0: for x in missing: print('{0} is missing in {2} compared to {1}'.format(x, name1, name2)) passed = False else: print('lists match: {0}, {1}'.format(name1, name2)) return passed
1f55b430bc94b602eeda109374863b884b1122f6
95,121
import stat def mode_allows_group_or_other(st_mode): """ Returns True if st_mode bitset has group or other permissions :param st_mode: int: bit set from a file :return: bool: true when group or other has some permissions """ return (st_mode & stat.S_IRWXO or st_mode & stat.S_IRWXG) != 0
7090aa3cccd006777c91c700dd5fc5e633369c10
95,127
def get_index_of_csr_data(i, j, indptr, indices): """ Get the value index of the i,j-element of a matrix in CSR format. Parameters ---------- i : int row index which is asked to get the CSR-index for j : int column index which is asked to get the CSR-index for indptr : ndarray index-ptr-Array of the CSR-Matrix. indices : ndarray indices array of CSR-matrix (represents the nonzero column indices) Returns ------- k : int index of the value array of the CSR-matrix, in which value [i,j] is stored. Notes ----- This routine works only, if the tuple i,j is acutally a real entry of the matrix. Otherwise the value k=0 will be returned and an Error Message will be provided. """ # indices for row i are stored in indices[indptr[k]:indptr[k+1]]; thus the indptr marks the start and end of the # part of the indices and val vector where all entries of a row are stored # set k to the start of data of row k k = indptr[i] # search for appearance of j in the nonzero column indices which are stored in indices[k] till # indices[k+indptr[i+1]] while j != indices[k]: # while column j not found search for j in next entry k += 1 # Check if next search would be in next (wrong) row if k > indptr[i + 1]: print('ERROR! The index in the csr matrix is not preallocated!') k = 0 break return k
a28bcf6ef89b3756e9b89908ca5ffe6fda620d24
95,129
def get_kwargs_defaults(argspec): """Computes a kwargs_defaults dictionary for use by get_args_tuple given an argspec.""" arg_names = tuple(argspec.args) defaults = argspec.defaults or () num_args = len(argspec.args) - len(defaults) kwargs_defaults = {} for i, default_value in enumerate(defaults): kwargs_defaults[arg_names[num_args + i]] = default_value if getattr(argspec, "kwonlydefaults", None): kwargs_defaults.update(argspec.kwonlydefaults) return kwargs_defaults
d21c8dc20262f39e16400342770c40b5ea0a7e3b
95,133
def odds_to_prob(odds): """ Convert the odds given to the expected probability of the event :param odds: numeric: the odds given for the event :return: decimal: the probability of the event occurring """ return 1 / (1 + odds)
978b96c778c170f0cf13a77a8ebd83f92d135355
95,136
def GetTensorFlowVersion(vm): """Returns the version of tensorflow installed on the vm. Args: vm: the target vm on which to check the tensorflow version Returns: installed python tensorflow version as a string """ stdout, _ = vm.RemoteCommand( 'echo -e "import tensorflow\nprint(tensorflow.__version__)" | python' ) return stdout.strip()
ed8de7bcd1fd0f1bed6f4b6659afbc1411e1ceed
95,137
def _iou(box1, box2, precision=1e-5): """ Calculate the Intersection over Union value for 2 bounding boxes :param box1: array of 4 values (top left and bottom right coords): [x0, y0, x1, x2] :param box2: same as box1 :param precision: calculate precision for calculating :return: IoU """ box1_x0, box1_y0, box1_x1, box1_y1 = box1 box2_x0, box2_y0, box2_x1, box2_y1 = box2 int_x0 = max(box1_x0, box2_x0) int_y0 = max(box1_y0, box2_y0) int_x1 = min(box1_x1, box2_x1) int_y1 = min(box1_y1, box2_y1) int_area = max(int_x1 - int_x0, 0) * max(int_y1 - int_y0, 0) b1_area = (box1_x1 - box1_x0) * (box1_y1 - box1_y0) b2_area = (box2_x1 - box2_x0) * (box2_y1 - box2_y0) # we add small epsilon of 1e-05 to avoid division by 0 ret_iou = int_area / (b1_area + b2_area - int_area + precision) return ret_iou
6d56e51006a7f16a9253d14487e239b54225c8ad
95,138
def _get_gcs_path(base_path, content_type, root_id, timestamp): """Generate a GCS object path for CAI dump. Args: base_path (str): The GCS bucket, starting with 'gs://'. content_type (str): The Cloud Asset content type for this export. root_id (str): The root resource ID for this export. timestamp (int): The timestamp for this export. Returns: str: The full path to a GCS object to store export the data to. """ return '{}/{}-{}-{}.dump'.format(base_path, root_id.replace('/', '-'), content_type.lower(), timestamp)
de36e848bc251a099d899ffd6e39ed83910f3228
95,141
def reshape_dimensions_single(x, begin_axis, end_axis, target_dims): """ Reshape selected dimensions in a tensor to a target dimension. Args: x (torch.Tensor): tensor to reshape begin_axis (int): begin dimension end_axis (int): end dimension target_dims (tuple or list): target shape for the range of dimensions (@begin_axis, @end_axis) Returns: y (torch.Tensor): reshaped tensor """ assert(begin_axis <= end_axis) assert(begin_axis >= 0) assert(end_axis < len(x.shape)) assert(isinstance(target_dims, (tuple, list))) s = x.shape final_s = [] for i in range(len(s)): if i == begin_axis: final_s.extend(target_dims) elif i < begin_axis or i > end_axis: final_s.append(s[i]) return x.reshape(*final_s)
006d23754dc9470524a6713b2e16d826452737d0
95,142
def throws_exception(call, exceptions=[Exception]): """ Invoke the function and return True if it raises any of the exceptions provided. Otherwise, return False. """ try: call() except tuple(exceptions): return True except Exception: pass return False
53e4f35c5e23f39a8a5275d24350d9d3cad16066
95,143
def dict_to_tabs( d, order=None ): """ Return tab-separated values for all values in d and ordered by the keys in order (if given). """ order = order or d.keys() return '\t'.join([ str( d[ key ] ) for key in order ])
3fa180edbf482cf73152ec24e07119c3f7c6cdbb
95,145
import click def module_template_options(func): """Merge the module template option decorators into a single one.""" template_dec = click.option("--template", "-t", default="csharp", show_default=True, required=False, type=click.Choice(["c", "csharp", "java", "nodejs", "python", "csharpfunction"]), help="Specify the template used to create the default module") group_id_dec = click.option("--group-id", "-g", default="com.edgemodule", show_default=True, help="(Java modules only) Specify the groupId") return template_dec(group_id_dec(func))
246b65725c199ac21dd5e22aa355f830143929fd
95,148
def mse(C, ST, D_actual, D_calculated): """ Mean square error """ return ((D_actual - D_calculated)**2).sum()/D_actual.size
c6549daefd35244ba7a2eaf68ff31e31f0d87e3e
95,151
def remove_id(word): """Removes the numeric suffix from the parsed recognized words: e.g. 'word-2' > 'word' """ return word.count("-") == 0 and word or word[0:word.rindex("-")]
830d420d648322a631bfeeb91a11c35215b703ff
95,152
def get_mxnet_model_info(model): """Get native MXNet model details for given Keras Model. `data_names` and `data_shapes` are returned that can be used to bind the model in MXNet Module. `data_names` and `data_shapes` represents input layer name and shape. You can change the first dimension of data_shapes to match batch size for inference. Note: You should use `save_mxnet_model()` API for saving the model in native MXNet model format. # Arguments model: Keras model instance from which to extract MXNet model details. # Returns data_names, data_shapes # Raises AssertionError if Model is not compiled. """ assert model is not None, 'MXNet Backend: Invalid state. Model cannot be None.' # Underlying MXNet model for Inference in native MXNet engine. symbol = model._pred_mxnet_symbol module = model._module assert symbol is not None, 'MXNet Backend: Invalid state. MXNet Symbol cannot be None.' assert module is not None, 'MXNet Backend: Invalid state. MXNet Module cannot be None.' # Get Module Input data_names and data_shapes. # This info will be useful for users to easily bind the exported model in MXNet. pred_module = module._buckets['pred'] data_names = pred_module.data_names data_shapes = pred_module.data_shapes return data_names, data_shapes
5051e847298dfc892ab0a39000b5fd1905a97187
95,153
def calc_p1(p,p_p,p_m, i, cd): """Calculate the control point p1 of the current cubic Bezier curve.""" if not i: x = p.x + ((p_p.x - p.x) / cd) y = p.y + ((p_p.y - p.y) / cd) else: x1 = -(p_m.x - p.x) / cd y1 = - (p_m.y - p.y) / cd x2 = (p_p.x - p.x) / cd y2 = (p_p.y - p.y) / cd x = p.x + (x1 + x2) / cd y = p.y + (y1 + y2) / cd return x, y
a44a3965b55252e6eb2411085a9421e5e5fa1a33
95,161
def calculate_overlap(a: str, b: str) -> int: """ Calculates an overlap between two strings using Knuth–Morris–Pratt algorithm """ pi = [0] * (len(a) + len(b) + 1) string = b + '#' + a for i in range(len(string)): if i == 0: continue j = pi[i - 1] while j > 0 and string[i] != string[j]: j = pi[j - 1] if string[i] == string[j]: j += 1 pi[i] = j return pi[-1]
3f2f902d8a9b8d23d69cfbd502cc4d75723e55c5
95,165
import hashlib def sha1sum(filename: str) -> str: """ Generate a SHA1 hash of the provided file. Args: filename: Absolute path to file to hash as `str` Returns: SHA-1 hash """ h = hashlib.sha1() with open(filename, 'rb') as f: for byte_block in iter(lambda: f.read(4096), b""): h.update(byte_block) return h.hexdigest()
f533acc0c89cfe7e5847963e7aad6e0119c4eb78
95,169
def is_s3_schema(s: str) -> bool: """Check whether a string represents an S3 schema.""" return s == "s3://"
3dea2c8f518bf844d4b341ee4a5b8283fd38747d
95,183
def _is_domain_2nd_level(hostname): """returns True if hostname is a 2nd level TLD. e.g. the 'elifesciences' in 'journal.elifesciences.org'. '.org' would be the first-level domain name, and 'journal' would be the third-level or 'sub' domain name.""" return hostname.count(".") == 1
3836089b06d0c40938c45a0044ce1464053aaaf9
95,184
def sort_data(angs, seqs, crds, ids): """ Sorts inputs by length, with shortest first. """ sorted_len_indices = [a[0] for a in sorted(enumerate(angs), key=lambda x:x[1].shape[0], reverse=False)] seqs = [seqs[i] for i in sorted_len_indices] crds = [crds[i] for i in sorted_len_indices] angs = [angs[i] for i in sorted_len_indices] ids = [ids[i] for i in sorted_len_indices] return angs, seqs, crds, ids
3d15a0abd321b4570d142c65d5792fe60c6fe0dd
95,194
import re def checkio(data): """ ^ - not necessary with match(). The start of the string. (?=.*[0-9]) - at least one digit exists. (?=.*[A-Z]) - at least one upper case letter exists. (?=.*[a-z]) - at least one upper case lower exists. (?=\\w{10}) - the length is minimum 10. \\w+ - apply to the entire string and to the only ASCII characters $ - the end of the string. flags=re.A - is necessary for ASCII checking. """ return bool(re.match(r"^(?=.*[0-9])(?=.*[A-Z])(?=.*[a-z])(?=\w{10})\w+$", data, flags=re.A))
67b8ae5e6564d4e5e959ce7b4097960296cdd381
95,195
def get_bytes_asset_dict( data: bytes, target_location: str, target_name: str, ): """Helper function to define the necessary fields for a binary asset to be saved at a given location. Args: data: the bytes to save target_location: sub-directory to which file will be written, relative to run directory root. Defaults to empty string (i.e. root of run directory). target_name: filename to which file will be written. Defaults to None, in which case source_name is used. Returns: dict: an asset dictionary """ return { "bytes": data, "target_location": target_location, "target_name": target_name, }
16bfb7f3b675dc67688b1e06b5b1c34a98e01783
95,196
def get_accuracy_func(config): """Get the accuracy function for an Audioset task. Note: The accuracy will be computed depending **only on the prediction in the last timestep**, where the last timestep refers to the **unpadded sequence**. Args: config (argparse.Namespace): The command line arguments. Returns: (func): An accuracy function handle. """ def get_accuracy(logit_outputs, targets, data, batch_ids): """Get the accuracy for an Audioset task. Note that here we expect that, in the multi-head scenario, the correct output head has already been selected, and ``logit_outputs`` corresponds to the outputs in the correct head. Args: (....) See docstring of function :func:`sequential.copy.train_utils_copy.get_accuracy`. Returns: (....) See docstring of function :func:`sequential.copy.train_utils_copy.get_accuracy`. """ seq_length = targets.shape[0] batch_size = targets.shape[1] # Pick the last prediction per sample. logit_outputs = logit_outputs[seq_length-1, :, :] # Get the predicted classes. # Note, we don't need to apply the softmax, since it doesn't change the # argmax. predicted = logit_outputs.argmax(dim=1) # Targets are the same for all timesteps. targets = targets[0, :, :] targets = targets.argmax(dim=1) accuracy = 100. * (predicted == targets).sum().cpu().item() / \ batch_size return accuracy, None # accuracy per ts not yet implemented return get_accuracy
9ef0307fcaf3d1cb179ff17c6c1264a219935fde
95,198
def duplicate_header(header, duplicate): """Create and return an 'Duplicate header' error string""" ctx = duplicate.start_context() octx = header.start_context() errmsg = 'Duplicate header, {}{}'.format(header.name, ctx) if len(octx) > 0: errmsg = errmsg + ', original{}'.format(octx) # End if return errmsg
00b9154dfc4267936a3d832f0a24aae74632d761
95,202
import collections def group_by_dtype(tensors): """ Returns a dict mapping from the tensor dtype to a list containing all tensors of that dtype. Arguments: tensors (Iterable[Tensor]): list of tensors. """ tensors_by_dtype = collections.defaultdict(list) for tensor in tensors: tensors_by_dtype[tensor.dtype].append(tensor) return tensors_by_dtype
20e4270e376d77ed14620f883a553e42d7120507
95,208