content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def filter_values(dictionary): # type: (dict) -> dict """ Remove `None` value entries in the dictionary. :param dictionary: :return: """ return dict([(key, value) for key, value in dictionary.items() if value is not None])
614cec351146341c900b6e6ba1b5ba0bbdd67923
76,238
import uuid def store_mail(content): """Saves mail as file to send it to cortex analyzers. arguments: - content: mail payload to write on disk returns: - eml_path: path on disk """ eml_path = "/wip/{}.eml".format(uuid.uuid4()) with open(eml_path, "wb") as f: f.write(content) return eml_path
92f7efd7288f145596832ab884da087453cc890a
76,240
def format_as_diagnostics(lines): """Format the lines as diagnostics output by prepending the diagnostic #. This function makes no assumptions about the line endings. """ return ''.join(['# ' + line for line in lines])
533dd8b013ccd1aaa6ec3ce4ec4bfd19ca3fc165
76,242
from typing import List def match_prompt_list(target_str: str, prompt_list: List[str]) -> bool: """ Matches any of the prompt candidate strings. """ for prompt_str in prompt_list: if target_str == prompt_str: return True return False
aa2b3369199d0d269d0eaf01c02b42cd346578af
76,244
def filter_intron(df_in, min_size): """Filter intron regions to remove those smaller than min_size.""" # remove regions shorter then min_size df_out = df_in[df_in.end - df_in.start >= min_size].copy() return df_out
eb3a28be141d062f00c7c2b088d67c78107f4afa
76,247
def merge_feed_dict(*feed_dicts): """ Merge all feed dicts into one. Args: \**feed_dicts: List of feed dicts. The later ones will override values specified in the previous ones. If a :obj:`None` is specified, it will be simply ignored. Returns: The merged feed dict. """ ret = {} for feed_dict in feed_dicts: if feed_dict is not None: ret.update(feed_dict) return ret
9c8a0314dec75f484d86926737dd98ff7a54784d
76,249
def word_phone_split(line:list, corpus_name:str): """ Splits the input list line into the word and phone entry. voxforge has a middle column that is not used """ if corpus_name == "voxforge": word, phones = line[0], line[2:] else: try: word, phones = line[0], line[1:] except IndexError: print(f"index error in line: {line}") raise IndexError return word, phones
c313439553728344ac8153b67076eacc04b20de0
76,250
def check_temperature(Tpp, Tmax = 0.0, Tmin = -10.0): """ Sets the precipitation temperature and snow temperature. Args: Tpp: A numpy array of temperature, use dew point temperature if available [degrees C]. Tmax: Thresholds the max temperature of the snow [degrees C]. Tmin: Minimum temperature that the precipitation temperature [degrees C]. Returns: tuple: - **Tpp** (*numpy.array*) - Modified precipitation temperature that is thresholded with a minimum set by tmin. - **tsnow** (*numpy.array*) - Temperature of the surface of the snow set by the precipitation temperature and thresholded by tmax where tsnow > tmax = tmax. """ Tpp[Tpp < Tmin] = Tmin tsnow = Tpp.copy() tsnow[Tpp > Tmax] = Tmax return Tpp, tsnow
8d5ca6e1dffc1e159ed2e9576c14d9e958b37d2b
76,253
def ktmetric(kt2_i, kt2_j, dR2_ij, p = -1, R = 1.0): """ kt-algorithm type distance measure. Args: kt2_i : Particle 1 pt squared kt2_j : Particle 2 pt squared delta2_ij : Angular seperation between particles squared (deta**2 + dphi**2) R : Radius parameter p = 1 : (p=1) kt-like, (p=0) Cambridge/Aachen, (p=-1) anti-kt like Returns: distance measure """ a = kt2_i**(2*p) b = kt2_j**(2*p) c = (dR2_ij/R**2) return (a * c) if (a < b) else (b * c)
4a725910f39e136204a0d3fc3f7b1abcd69da0e4
76,254
def build_model(cfg): """ Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. Note that it does not load any weights from ``cfg``. """ meta_arch = cfg.MODEL.META_ARCHITECTURE return globals()[meta_arch](cfg)
373fbbb9a08112215dc20f474296ec56062531f4
76,258
import re def oneline(s): """Converts a multi-line string to one line and removes extra spaces""" return re.sub("[\s]+", " ", s).strip()
18658d39f61ddf70d0688e407ce73484ae08fdde
76,261
import ast def handle_operator_code(self, opcode): """ Parses an operator code and returns its string representation. Returns an empty string on error. """ if isinstance(opcode, ast.Add): op = "+" elif isinstance(opcode, ast.Sub): op = "-" elif isinstance(opcode, ast.Mult): op = "*" elif isinstance(opcode, ast.MatMult): op = "*" elif isinstance(opcode, ast.Div): op = "/" elif isinstance(opcode, ast.Mod): op = "%" elif isinstance(opcode, ast.Pow): op = "^" elif isinstance(opcode, ast.LShift): op = "<<" elif isinstance(opcode, ast.RShift): op = ">>" elif isinstance(opcode, ast.BitOr): op = "|" elif isinstance(opcode, ast.BitXor): op = "^" elif isinstance(opcode, ast.BitAnd): op = "&" elif isinstance(opcode, ast.FloorDiv): op = "//" else: self.log_with_loc( "Failed to identify the operator. Using an empty dummy.", loglevel="ERROR") op = "" return op
701cac1ad4485115b3aca94246e06bd1fee504dc
76,264
def flatten_list(to_flat): """ Flattens 2D-list to 1d :param to_flat: List to flatten :return: 1D list """ return [row for rows in to_flat for row in rows]
00d1e0bae24da120d757518c30a06da9d5ccbe89
76,265
def entitydata_delete_confirm_form_data(entity_id=None, search=None): """ Form data from entity deletion confirmation """ form_data = ( { 'entity_id': entity_id, 'entity_delete': 'Delete' }) if search: form_data['search'] = search return form_data
df473118ea31df991d65395f2e55dfdc350a24ed
76,267
def mag_to_flux(mag, zeropoint=27.0): """Convert magnitude into flux unit. """ return 10.0 ** ((zeropoint - mag) / 2.5)
d2da31466b0141fb4a2eb7ec6a45b865bc122931
76,270
def split(range, upper): """ >>> split((0, 127), False) (0, 63) >>> split((0, 63), True) (32, 63) >>> split((32, 63), False) (32, 47) >>> split((32, 47), True) (40, 47) >>> split((40, 47), True) (44, 47) >>> split((44, 47), False) (44, 45) >>> split((44, 45), False) (44, 44) >>> split((0, 7), True) (4, 7) >>> split((4, 7), False) (4, 5) >>> split((4, 5), True) (5, 5) """ start, end = range length = (end - start + 1) // 2 if upper: return (start + length, end) else: return (start, end - length)
926aae870f69dcd8215d2ae37f1a2998dc40de69
76,273
import json def load_json_data(json_data, encoding='utf-8'): """Load JSON contents from binary data. Parameters ---------- json_data : bytes Binary data encoding JSON contents. encoding : str (optional, default 'utf-8') Encoding that was used. Returns ------- contents : dict JSON contents. """ return json.loads(json_data.decode(encoding))
be1e9d9a1feab3d07247ab2990c9f4bbf898f1da
76,275
import unicodedata def remove_accented_chars(text): """ Removes accented characters from the test """ new_text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore') return new_text
b3a4e5b2bed54b17ed1d406f37009cacfe029217
76,277
from typing import Tuple import re def split_line(s: str, *, pattern: str) -> Tuple[str, str]: """ Splits a line into two parts and tests the first part against pattern. Splits a line on the first white space. If the first word matches the specified pattern, then it returns a tuple containing the first word and the remainder of the string, If there is no match, the tuple contains an empty string and the original string. """ # Split into first word and remaining text parts = s.split(None, 1) if len(parts) == 0: return "", s match = re.match(pattern, parts[0]) if match is None: # no match, return original string as remainder return "", s if len(parts) > 1: # string contains a match and a remainder return parts[0], parts[1] # string contains match only return parts[0], ""
4516fce664aef456a0596b4fc3bd76e14f2b93a1
76,281
def delete_session_mock(mocker): """Mock for patching DELETE request""" return mocker.patch("hydra_agent.agent.Session.delete")
26c3073e2d51dec1f3dcd9bfee3db311fdcf6a66
76,284
import base64 def decode_string(string: str) -> str: """Base64 decode a string. Args: string: String to decode Returns: Decoded string """ decoded_bytes = base64.b64decode(string) return str(decoded_bytes, "utf-8")
0075efafbcd3c48f3586ba11132a8aa51056fbc1
76,287
def create_error_response(code, jrpc_id, msg): """ Creates JSON RPC error response. Parameters : code: Error code jrpc_id: JSON RPC id msg: Error message Returns : JSON RPC error response as JSON object. """ error_response = {} error_response["jsonrpc"] = "2.0" error_response["id"] = jrpc_id error_response["error"] = {} error_response["error"]["code"] = code error_response["error"]["message"] = msg return error_response
f5939b5ace7449d1f14321b1a902847d88f0d2db
76,288
from datetime import datetime def generate_filename(instance, filename): """Generate the path of the tool-picture""" return 'users/{email}/tools/{stamp}-{file}'.format(email=instance.owner.email, stamp=datetime.now().timestamp(), file=filename)
597c96da2d55e5119f629938c67400cf916a33a6
76,290
from typing import Optional def filter_dictionary(dictionary: dict, fields_to_keep: Optional[tuple], sort_by_field_list: bool = False) -> dict: """ Filters a dictionary and keeps only the keys that appears in the given list :param dictionary: the origin dict :param fields_to_keep: the list which contains the wanted keys :param sort_by_field_list: whether to sort the dictionary keys according to the field list :return: the dictionary, with only fields that appeared in fields_to_keep. >>> filter_dictionary({1:2,3:4}, (3,)) {3: 4} >>> filter_dictionary({1:2,3:4}, (3,1)) {1: 2, 3: 4} >>> filter_dictionary({1:2,3:4}, (3,1), True) {3: 4, 1: 2} >>> filter_dictionary({1:2,3:4}, (,), True) {} >>> filter_dictionary({1:2,3:4}, None) {} """ filtered = {k: v for k, v in dictionary.items() if k in (fields_to_keep or [])} if sort_by_field_list: key_order = {v: i for i, v in enumerate(fields_to_keep or [])} filtered = dict(sorted(filtered.items(), key=lambda pair: key_order[pair[0]])) return filtered
c8196c43bd49a593060093fc49c668c47b950946
76,296
def NOT_IS(attribute, val): """ Check if two values are not equal """ return attribute != val
3567a3789bc3ab9176ee9ba4a5d1eb0a4c4dec48
76,299
def _build_task_dependency(tasks): """ Fill the task list with all the needed modules. Parameters ---------- tasks : list list of strings, containing initially only the last module required. For instance, to recover all the modules, the input should be ``['fourier']``. Returns ------- tasks : list Complete task list. """ if not isinstance(tasks, (tuple, list)): tasks = [tasks] tasks = set(tasks) if 'thermodynamics' in tasks: tasks.discard('background') # if 'lensing' in tasks: # tasks.add('harmonic') if 'harmonic' in tasks: tasks.add('fourier') if 'fourier' in tasks: tasks.add('transfer') return list(tasks)
6da74f01badfc395c3be7daf8ae30df9e2837c63
76,300
def volume_flow(p, G, R, T, Z): """ :param p: Absolute pressure, Pa :param G: Mass flow, kg/s :param R: Gas constant, J/(kg*K) :param Z: Compressibility factor :return: Volume flow of the natural gas, m3/s """ return Z * G * R * T / p
25fd7d076272f8747390824a922b5808e312e4ad
76,302
def level_from_severity(severity): """Converts tool's severity to the 4 level suggested by SARIF """ if severity == "CRITICAL": return "error" elif severity == "HIGH": return "error" elif severity == "MEDIUM": return "warning" elif severity == "LOW": return "note" else: return "warning"
25fe348a39a7a26b507bde84fb94eba4c9d554bf
76,304
def get_paths(corpus, x, y, pattern): """ Returns the paths between (x, y) term-pair :param corpus: the corpus resource object :param x: the X entity :param y: the Y entity :param pattern: path patterns to exclude (e.g. satellites) :return: all paths between (x, y) which do not match the pattern """ x_to_y_paths = corpus.get_relations(x, y) paths = [path_id for path_id in x_to_y_paths.keys() if pattern.match(corpus.get_path_by_id(path_id))] return paths
ede98e4aafdc17802298bf2f99de15fd871f4cab
76,305
def setCompare(iter1, iter2): """ Compares two groups of objects, returning the sets: onlyIn1, inBoth, onlyIn2 """ s1 = set(iter1) s2 = set(iter2) intersect = s1 & s2 return s1 - intersect, intersect, s2 - intersect
bca09656503a13e693a7f8c9b9eb0e11cb867d49
76,312
def consolidate_headers(header_offsets): """ This takes the array of headers and returns just the starting character of each header. These character offsets are used to section the document """ return [x[0] for x in header_offsets]
4f0e080795eff168065304a35ea3836db798112b
76,315
def max_o(x, n, prevent=False): """ Ensures that x fits on n bits (not bytes). - If prevent is True then an exception is raised - Otherwise just set all bits to 1 Parameters ---------- x Number to test n Number of bits available prevent If true then an exception will rise in case of overflow Returns ------- A value which fits within the bit number constraint Raises ------ ValueError If the value overflows and prevent is true """ if x >= 1 << n: if prevent: raise ValueError else: return (1 << n) - 1 return x
c40f53f12a4c6301cd378c0c61375c376e5d53ae
76,316
def pingponglet(value): """Return a value unchanged""" return value
254330478eb52c9c498d4f716cf8c7c488b43358
76,320
import math def get_chain_stats(embedding): """ Embedded chains account for the number of qubits used per variable in the source. Arg: embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. Returns: *stats: Max, min, tota, average, standard deviation """ total = 0 max_chain = 0 N = len(embedding) min_chain = float('inf') # Get max min and total for chain in embedding.values(): chain_len = len(chain) total += chain_len if chain_len > max_chain: max_chain = chain_len if chain_len < min_chain: min_chain = chain_len # Get avg and standard deviation avg_chain = total/N sum_deviations = 0 for chain in embedding.values(): chain_len = len(chain) deviation = (chain_len - avg_chain)**2 sum_deviations += deviation std_dev = math.sqrt(sum_deviations/N) return max_chain, min_chain, total, avg_chain, std_dev
7788f3103016aa6173a8bb4ef43ea87378d7ebb1
76,323
def get_db_img_name(img_name, processing): """Creates image name given processing type Args: img_name (str): image name processing (str): processing applied Returns: str: Created image name """ img_name, filetype = img_name.split('.') return img_name + processing + "." + filetype
a2062110cb0356ae75e9ab87b1272849b1e6cf82
76,324
def find_3D(index, header, max_distance, quality=5): """ Return the list of USArray 3-D EMTF keys (for repository :class:`Index` *index*) that are less than *max_distance* (in km) from the magnetometer location specified in *header*. Only include *quality* or greater sites. """ lat = header['geodetic_latitude'] lon = header['geodetic_longitude'] return index.quality_subset(min_quality=quality).by_distance(lat, lon, max_distance)
28c4a5d29518cf129d502bb3090007b7884d34e0
76,325
def get_slot(grps, k): """ Given array of match groups, return first key matching :param grps: :param k: :return: tuple matching. """ for g in grps: key, v, x1, x2 = g if key == k: return g return None
4fb55d4a6dbb5960bef1590a2cd2131f276507e0
76,326
from typing import List from typing import Tuple def _format_list(input_list: List, evals_per_gen: int) -> Tuple: """ _format_list() takes as input data collected from multiple algorithm runs. Then, an average is computed and horizontal axis scaling is applied. The return value is a tuple of two list, each corresponds to a plot axis, ready to be plotted. """ run_count = len(input_list) run_length = len(input_list[0]) for i in input_list: assert len(i) == run_length, "Runs are of different length, cannot take average" y_axis = [] for i in range(run_length): y_axis.append(sum([sigmas[i] for sigmas in input_list]) / run_count) x_axis = [x*evals_per_gen for x in range(run_length)] return x_axis, y_axis
6ef9a11e13ceaeaae4fc9793aada1f43eb558659
76,329
import re def testPasswordStrength(password): """ check for at least eight characters long, contains both uppercase and lowercase characters, and has at least one digit Args: password (str): password as string Returns: strong (bool): True if password is strong else """ eightCharsLongRegex = re.compile(r'[\w\d\s\W\D\S]{8,}') upperCaseRegex = re.compile(r'[A-Z]+') lowerCaseRegex = re.compile(r'[a-z]+') oneOrMoreDigitRegex = re.compile(r'\d+') if not eightCharsLongRegex.search(password): return False elif not upperCaseRegex.search(password): return False elif not lowerCaseRegex.search(password): return False elif not oneOrMoreDigitRegex.search(password): return False return True
3d743195fb585e6beec8d7c39696548393edf04a
76,330
from functools import reduce def checksum(bytes): """Return the simple checksum of a sequence of bytes.""" return reduce(lambda a, b: a + b % 2**16, bytes)
c44047a3a2e522d9ed8072fd703d939d5436ebd9
76,335
from typing import Callable from typing import Any def lazy_property(f: Callable[..., Any]) -> Any: """ Decorator to make lazy-evaluated properties""" attribute = '_' + f.__name__ @property # type: ignore def _lazy_property(self: object) -> Any: if not hasattr(self, attribute): setattr(self, attribute, f(self)) return getattr(self, attribute) return _lazy_property
dda3e142dd7d0414c604e114efb9c06632c4343a
76,336
def escape_like(v: str) -> str: """ Escape a string for the use in `LIKE` condition. :param v: A string. :return: Escaped string. """ def esc(c): if c == "\\": return r"\\\\" elif c == "%": return r"\%" elif c == "_": return r"\_" else: return c return ''.join(map(esc, v))
c657e8bc59f18ce1adb4991f7eefa8e84ae55195
76,339
import torch def compute_logits(cluster_centers, data): """Computes the logits of being in one cluster, squared Euclidean. Args: cluster_centers: [K, D] Cluster center representation. data: [B, N, D] Data representation. Returns: log_prob: [B, N, K] logits. """ k = cluster_centers.shape[0] b = data.shape[0] cluster_centers = cluster_centers.unsqueeze(dim=0) # [1, K, D] data = data.contiguous().view(-1, data.shape[-1]).unsqueeze(dim=1) # [N, 1, D] # neg_dist = -torch.sum(torch.pow(data - cluster_centers, 2), dim=-1) # [N, K] neg_dist = -torch.mean(torch.pow(data - cluster_centers, 2), dim=-1) # [N, K] neg_dist = neg_dist.view(b, -1, k) return neg_dist
ec0f7c69654b4e233a0d98582d06f9cf169f2378
76,342
from pathlib import Path def list_files(path='.', extension=''): """Return list of files with specific extension in path. - by default, return all files with any extension. - directories are excluded - results are sorted by name """ folder = Path(path) pattern = '*' + extension paths = folder.glob(pattern) files = [p for p in paths if p.is_file()] return sorted(files)
2852e4edd32858605df1bddf3d2716576ab69a7f
76,346
def get_info_file_path(path): """ example: path=/000001/000001493.png returns /000001/000001493.gt_data.txt """ return path[:16] + '.gt_data.txt'
5a97beb5e313730c6ad46a24af2aebf2e76e67c5
76,347
def largest_prime_factor_even_optimized(number): """ We know that, excluding 2, there are no even prime numbers. So we can increase factor by 2 per iteration after having found the """ factors = [] factor = 2 if number % factor == 0: number = number // factor factors.append(factor) while number % factor == 0: number = number // factor factor = 3 while number > 1: if number % factor == 0: factors.append(factor) number = number // factor # Remainder guarenteed to be zero while number % factor == 0: number = number // factor # Remainder guarenteed to be zero factor += 2 return factors
36269bc3fc68af504d85adae6069fb98469be9d3
76,348
from typing import Optional from typing import Any def save_file( file_path: Optional[str] = None, content: Any = None, mode: str = "w", encoding: str = "utf-8", newline: str = "\n", writer: Any = None, ) -> None: """ Save a file. :param file_path: The path to the file to save. :type file_path: str :param content: The content to save. :type content: Any :param mode: The mode to use when opening the file., defaults to "w" :type mode: str, optional :param encoding: The encoding to use when writing the file, defaults to "utf-8" :type encoding: str, optional :param newline: The newline character to use when writing the file, defaults to "\n" :type newline: str, optional """ if not isinstance(file_path, str): return None if mode == "wb": with open(file_path, mode) as file: _ = file.write(content) if not writer else writer(content, file) else: with open(file_path, mode, encoding=encoding, newline=newline) as file: _ = file.write(content) if not writer else writer(content, file)
b1ff348e661485884f3a7a8cdb80766ea9241a19
76,355
def parse_events(artifact: dict): """ Parses events from the compiled contract, and stores them in the global events variable. :param json artifact: the artifact json object compiled contract loaded """ events = [] # list of event names event_input_types = {} # map event name to tuple of input types for entry in artifact['abi']: if entry['type'] == 'event': event_name = entry['name'] events.append(event_name) event_input_types[event_name] = (input['type'] for input in entry['inputs']) return events, event_input_types
159b81b8e55ecec055ee8f3da864c30412b8247e
76,356
def string_parse(input_str): """ Converts passed string, into *args, **kwargs: Args: input_str(str): input string in format - "1, 2, 3, a\nvalue3=4, value1=arg1, value2=arg2" Returns: tuple(*args, **kwargs): parsed args, and kwargs values """ args_str, kwargs_str = input_str.split('\n') args_raw = args_str.split(',') kwargs_raw = kwargs_str.split(', ') args = [item.strip() for item in args_raw] kwargs = dict((item.split('=') for item in kwargs_raw)) return args, kwargs
3c1dc3d81c0539019c6095c00741f0daf0f5c188
76,360
def calc_tanimoto(Na, Nb): """Calculates the Tanimoto similarity coefficient between two sets NA and NB.""" Nab = len(set(Na).intersection((set(Nb)))) return float(Nab) / (len(Na) + len(Nb) - Nab)
e76a77c8b1b72ecaba59fe575e6dab6be21bf1d5
76,366
def create_infertility_feature(diagnosis): """ Creates the one-hot encoded infertility diagnosis feature from a given diagnosis. Parameters: ----------- diagnosis : str, The infertility diagnosis, must be one of Ovulatory disorder, Male factor, Endometriosis or Unexplained. Returns: ------- infertility : list, The one-hot encoded infertility feature. """ # column index in feature matrix of feature idx_dict = {'Tubal disease':0, 'Ovulatory disorder':1, 'Male factor':2, 'Endometriosis':3, 'Unexplained':4} # create feature vector idx = idx_dict[diagnosis] infertility = [0,0, 0, 0, 0] infertility[idx] = 1 return infertility
746d80464f5951de1376ade1d3595f8310a58a2c
76,372
def get_clean_text(html): """ Removes extra blank spaces and nbsp from html text. """ return " ".join(html.text_content().split())
df0f789ddbbaee51ea807465bf7e286b2a2a4096
76,375
from datetime import datetime def convert_time(time): """Convert a time string into 24-hour time.""" split_time = time.split() try: # Get rid of period in a.m./p.m. am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj = datetime.strptime(time_str, '%I %p') return time_obj.strftime('%H:%M %p')
4bd59b2e10f4a15260dd2cef840dd6cea97475f6
76,376
def pad(data: bytes, size: int, value: int = 0) -> bytes: """Padds given data with given value until its length is not multiple by size Parameters: data: bytes Data to pad size: int Required size data length must be multiple to value: int Value to add to data. 0 by default """ value = (value & 0xff) while len(data) % size: data += bytes([value]) return data
3879930d6516daa751dcd310c93055b30c36b852
76,377
import collections def parse_chrom_size(path, remove_chr_list=None): """ support simple UCSC chrom size file, or .fai format (1st and 2nd columns same as chrom size file) return chrom:length dict """ if remove_chr_list is None: remove_chr_list = [] with open(path) as f: chrom_dict = collections.OrderedDict() for line in f: # *_ for other format like fadix file chrom, length, *_ = line.strip('\n').split('\t') if chrom in remove_chr_list: continue chrom_dict[chrom] = int(length) return chrom_dict
97770cc856d9dc1e6c90c8dda4caf9abac87e893
76,378
def split_data(X, Y): """splits data into 80 % training and 20 % test data.""" return X[:400], X[400:], Y[:400], Y[400:]
393430717026706a68d98dc6955d7902c3833ad1
76,380
def write_ps_hdf5(file, spec_name, l, ps, spectra=None): """Write down the power spectra in a hdf5 file. Parameters ---------- file: hdf5 the name of the hdf5 file spec_name: string the name of the group in the hdf5 file l: 1d array the multipoles (or binned multipoles) ps: 1d array or dict of 1d array the power spectrum, if spectra is not None, expect a dictionary with entry spectra spectra: list of strings needed for spin0 and spin2 cross correlation, the arrangement of the spectra """ def array_from_dict(l,ps,spectra=None): array = [] array += [l] if spectra == None: array += [ps] else: for spec in spectra: array += [ps[spec]] return array group = file.create_group(spec_name) array = array_from_dict(l, ps, spectra=spectra) group.create_dataset(name="data", data=array, dtype="float")
232b6d539d1d2420e7d785713efe7ec8a50f882b
76,383
def calc_resize_with_apect(size, min_dimension): """calculate the dimensions needed to resize an image with the minimum dimension on one side while preserving the aspect ratio. size: tuple containing the original image size in pixels (w,h) min_dimension: min pixel size on one size """ w = size[0] h = size[1] new_w = (w / min(size)) * min_dimension new_h = (h / min(size)) * min_dimension new_size = (int(new_w), int(new_h)) return new_size
fd52f50a2ce759fe4d510f6ae6b403ab2cb523e3
76,388
def find_text_idx(sentence): """ Return the index of the # text line or -1 """ for idx, line in enumerate(sentence): if line.startswith("# text"): return idx return -1
f3edb255e715ab7607802448ef750318d7b79e29
76,393
def parseLecturer(line): """Parses the line with lecturer's information Splits the line appropriately to firstName, lastName, specialization, preferedDates and residence """ logicalChunks = line.split(':') name = logicalChunks[0].split(' ') subjects = logicalChunks[1].split(',') preferedDates = logicalChunks[2].split(',') parsedLine = [name[0], name[1], subjects, preferedDates, logicalChunks[3].strip()] return parsedLine
7bc1f01b0656209d08ea08277745d7aa9b55fde2
76,398
from typing import Union def index_for_wavelength(wls, w) -> Union[None, int]: """Return the index for a particular wavelength or None if not present """ try: idx = wls.index(w) except ValueError: return None return idx
3c8e8a921609151acd671fd0d9c47732a7425ffd
76,399
def get_inner_text(element): """ Get the inner text of the specified XML element. """ buffer = [] for node in element.childNodes: if node.nodeType == node.TEXT_NODE: buffer.append(node.data) elif node.nodeType == node.ELEMENT_NODE: buffer.append(get_inner_text(node)) return "".join(buffer).strip()
f40af274dbb4cf5e78f5db77082abbd0dd618e4b
76,401
def _uniq(iterable): """Returns a list of unique elements in `iterable`. Requires all the elements to be hashable. Args: iterable: An iterable to filter. Returns: A new list with all unique elements from `iterable`. """ unique_elements = {element: None for element in iterable} return list(unique_elements.keys())
64a18b7865ac6d709e0732e5536eac7b7b08b433
76,403
def is_sub_seq(full_seq, sub_seq): """Return true if sub_seq is a sub-sequence of full_seq. """ if len(sub_seq) == 0: return True for idx0,x0 in enumerate(full_seq): if x0 == sub_seq[0]: if is_sub_seq(full_seq[idx0+1:], sub_seq[1:]): return True return False
fded0c65cdc7a817fdbf34a53469fc540f16d9b0
76,405
def aggregate_log_dict(agg_dict, new_dict) -> dict: """ Aggregate the statistics of a log dict :param agg_dict: aggregation dictionary :param new_dict: dict with new stats :return: new aggregation dict with aggregated """ for k in new_dict: # init new if not present if k not in agg_dict: agg_dict[k] = { 'n': 0, 'sum': 0.0, 'max': new_dict[k], 'min': new_dict[k], } # aggregate agg_dict[k]['n'] += 1 agg_dict[k]['sum'] += new_dict[k] agg_dict[k]['max'] = max(new_dict[k], agg_dict[k]['max']) agg_dict[k]['min'] = min(new_dict[k], agg_dict[k]['min']) return agg_dict
206c1ed2b2bf5218e42b97ad57583c61e72319f3
76,407
def read_file(filename, mode='rU', stripChar='\n', replaceChar='', lines=False): """ Basic implementation of reading a text file into a list. Default is to read one line at a time. If lines=True, uses read.readlines() and return each line of the file as is (includes all whitespace and newline chars) If lines=False will strip will remove all characters specified in stripChar and replace with replaceChar. Defaults of those settings are "\n" and "" respectively. @return: contents of the file """ with open(filename, mode) as f: if lines: file_contents = f.readlines() else: file_contents = [] for line in f: file_contents.append(line.replace(stripChar,replaceChar).strip()), return file_contents
b3d0ef838f97ba6a38f0d056f27ac98bc5a38e26
76,415
def RGBToHTMLColor(rgb): """ Convert an [R, G, B] list to #RRGGBB. :param: rgb - The elements of the array rgb are unsigned chars (0..255). :return: The html color. """ hexcolor = "#" + ''.join(['{:02x}'.format(x) for x in rgb]) return hexcolor
2b19dd68f8e23a095c5244eb2a1bdf59a932df97
76,418
from datetime import datetime def format_date_for_mongo(time_str): """ Reformat a time string into YYYY-MM-ddTHH:mm:ss. """ return datetime.strptime(time_str, '%y_%m_%d_%H_%M_%S')\ .strftime('%Y-%m-%dT%H:%M:%S')
382d4236eaa6d2057bb16284c9c863eae12afe49
76,421
def _process_y(dtype, data): """Returns the MPL encoding equivalent for Altair y channel """ return ('y', data)
cf72825eebe3f5ab0c79f7b7fa47d4b40732c895
76,427
def distribute_particles_across_tiles(particle_lons, particle_lats, tiles): """ Splits a list of particle longitudes and a list of particle latitudes into `tiles` equally sized lists. Args: particle_lons: List of particle longitudes. particle_lats: List of particle latitudes. tiles: Number of tiles or processors to split the particles into. Returns: particle_lons_tiled: A List containing `tiles` lists of particle longitudes for each processor. particle_lats_tiled: A List containing `tiles` lists of particle latitudes for each processor. """ assert particle_lons.size == particle_lats.size N_particles = particle_lons.size assert (N_particles / tiles).is_integer() particles_per_tile = N_particles // tiles particle_lons_tiled = tiles * [None] particle_lats_tiled = tiles * [None] for i in range(tiles): particle_idx_start, particle_idx_end = i*particles_per_tile, (i+1)*particles_per_tile particle_lons_tiled[i] = particle_lons[particle_idx_start:particle_idx_end] particle_lats_tiled[i] = particle_lats[particle_idx_start:particle_idx_end] return particle_lons_tiled, particle_lats_tiled
d887e622d18cbaadee3adce542ae80bc5a4da4a3
76,433
def _extract_element(xml, element_name, namespace): """ An internal method provided to extract an element from the given XML. :type xml: :class:`str` :param xml: The XML string from which the element will be extracted. :type element_name: :class:`str` :param element_name: The element that needs to be extracted from the XML. :type namespace: :class:`dict` :param namespace: A dict containing the namespace of the element to be extracted. :rtype: etree element. :return: The extracted element. """ assert(len(namespace) == 1) result = xml.xpath("//%s:%s" % (list(namespace.keys())[0], element_name), namespaces=namespace) if result and len(result) == 1: return result[0] else: raise KeyError("%s does not seem to be present or valid in the XML." % element_name)
83feee17192407d25f77aedff475843e5df3ee33
76,435
def smart_encode(content: str, encoding: str) -> bytes: """Encode `content` using the given `encoding`. Unicode errors are replaced. """ return content.encode(encoding, 'replace')
4e51aee004b2646f6f907335b883eb2fa5acbd59
76,436
def somatorio(n): """ Função recursiva que recebe um valor inteiro n >= 0, e devolve o somatório de 0 até n. Exemplos: ------------ somatorio(0) deve devolver 0; somatorio(2) deve devolver 3, pois 0+1+2 = 3; somatorio(5) deve devolver 15, pois 0+1+2+3+4+5 = 15; Retorno: ----------- int: soma dos números inteiros de 0 até n. """ if n == 0: return 0 else: return n + somatorio(n-1)
4a3852afa1f04d77fbd6b1581802ed47a9ff1950
76,438
import re def parse_env(env_in): """ Parses all environment variables into a map. """ env_out = {} reg = re.compile(r'^(?P<var_name>[A-Z0-9_\-]+)=(?P<var_value>.+)$') env_all = env_in.split(" ") for e in env_all: if e == "": continue match = reg.match(e.lstrip()) if not match: raise ValueError("Invalid Environment variable supplied.") var_name = match.group("var_name") var_value = match.group("var_value") env_out[var_name] = var_value return env_out
317319048db163f2352324db20622e9491fdc6fd
76,439
def _is_str(text): """Checks whether `text` is a non-empty str""" return isinstance(text, str) and text != ""
0ad979fd01f241bcb521f095061d7739f85cdfb4
76,440
def f_score(precision, recall, beta=1): """Compute F beta score Args: precision (float): precision recall (float): recall beta (float): the weight of recall, default=1 Returns: float: f score """ if recall + precision*beta**2 == 0: return 0 else: return (1 + beta**2)*precision*recall / (recall + precision*beta**2)
5a213cf0c8e23d6f43e25ad63cc099d64ff36341
76,445
import torch import math def bbox_iou(box1, box2, x1y1x2y2=True, giou=False, diou=False, ciou=False, eps=1e-9): """Compute the IoU of box1 to box2. box1 is 4, box2 is nx4 :param box1: box1 :param box2: box2 :param x1y1x2y2: xyxy or xywh :param giou: giou :param diou: diou :param ciou: ciou :param eps: 1e-9 :return: iou or giou, diou, ciou """ box2 = box2.T # Get the coordinates of bounding boxes if x1y1x2y2: # x1, y1, x2, y2 = box1 box1_x1, box1_y1, box1_x2, box1_y2 = box1[0], box1[1], box1[2], box1[3] box2_x1, box2_y1, box2_x2, box2_y2 = box2[0], box2[1], box2[2], box2[3] else: # transform from xywh to xyxy box1_x1, box1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 box1_y1, box1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 box2_x1, box2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 box2_y1, box2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 # Intersection area inter = (torch.min(box1_x2, box2_x2) - torch.max(box1_x1, box2_x1)).clamp(0) * \ (torch.min(box1_y2, box2_y2) - torch.max(box1_y1, box2_y1)).clamp(0) # Union area box1_w, box1_h = box1_x2 - box1_x1, box1_y2 - box1_y1 + eps box2_w, box2_h = box2_x2 - box2_x1, box2_y2 - box2_y1 + eps union = box1_w * box1_h + box2_w * box2_h - inter + eps iou = inter / union if giou or diou or ciou: convex_width = torch.max(box1_x2, box2_x2) - torch.min(box1_x1, box2_x1) # convex width convex_height = torch.max(box1_y2, box2_y2) - torch.min(box1_y1, box2_y1) # convex height if ciou or diou: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 convex_diagonal_squared = convex_width ** 2 + convex_height ** 2 + eps center_distance_squared = ((box2_x1 + box2_x2 - box1_x1 - box1_x2) ** 2 + (box2_y1 + box2_y2 - box1_y1 - box1_y2) ** 2) / 4 if diou: return iou - center_distance_squared / convex_diagonal_squared if ciou: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 upsilon = (4 / math.pi ** 2) * torch.pow(torch.atan(box2_w / box2_h) - torch.atan(box1_w / box1_h), 2) with torch.no_grad(): alpha = upsilon / ((1 + eps) - iou + upsilon) return iou - (center_distance_squared / convex_diagonal_squared + upsilon * alpha) else: # giou https://arxiv.org/pdf/1902.09630.pdf convex_area = convex_width * convex_height + eps return iou - (convex_area - union) / convex_area return iou
3a53910414f2352a0953446603b4fed071f44edd
76,447
def convertHLABack(h): """Takes format HLA-A*12:34 and returns A_1234""" return h[4:].replace('*', '_').replace(':', '')
5da352ff4dd740d3e6813db3883fc36c3effdd57
76,448
import math def split_list_by_percentage( list_to_split: list, percentage_to_split: float ) -> "tuple[list, list]": """ Split a list into two sub lists the second sub list has the length 'len(list) * percentage_to_split' """ split_index = math.floor(len(list_to_split) * percentage_to_split) return list_to_split[split_index:], list_to_split[:split_index]
29be2e6c3af170c36ac8763af28d882a511cc8bf
76,453
import six def _downsample(rng, xs, k): """Uniformly choose a maximal at-most-`k`-subsequence of `xs`. If `k` is larger than `xs`, then the contents of `xs` itself will be returned. This differs from `random.sample` in that it returns a subsequence (i.e., order is preserved) and that it permits `k > len(xs)`. Args: rng: A `random` interface. xs: A sequence (`collections.abc.Sequence`). k: A non-negative integer. Returns: A new list whose elements are a subsequence of `xs` of length `min(k, len(xs))`, uniformly selected among such subsequences. """ if k > len(xs): return list(xs) indices = rng.sample(six.moves.xrange(len(xs)), k) indices.sort() return [xs[i] for i in indices]
c9156a1fec1ebffcffcd1e83f4891b4afd695170
76,461
from datetime import datetime def make_test_run( testinstance, exec_date='', exec_time='', duration='0', status='Passed' ): """ Create a RunInstance in QC. """ run = testinstance.RunFactory.AddItem("Run {}".format(datetime.now())) run.Status = status run.SetField('RN_DURATION', duration) run.SetField('RN_EXECUTION_DATE', exec_date) run.SetField('RN_EXECUTION_TIME', exec_time) run.Post() run.Refresh() # do again, otherwise not showing in QC run.SetField('RN_EXECUTION_DATE', exec_date) run.SetField('RN_EXECUTION_TIME', exec_time) run.Post() run.Refresh() return run
ec8f747fc2c2fcfff8e86b4402f016de5bb9f4ad
76,462
import pkg_resources def parse(version): """Parses a ``version`` string. Currently a simple wrapper around ``pkg_resources.parse_version()``, for API purpose. Parsing could change later. """ return pkg_resources.parse_version(version)
54db2805667b946d719536b64dc4733806261e00
76,463
import json def load_json(file_path): """ Loads in a JSON file as a dict Args: file_path (str): path to JSON file Returns: dict: Data in file """ data = {} with open(file_path, 'r') as file: data = json.load(file) return data
24da94219f22240c043eff19ff68d4585306288f
76,468
from typing import TextIO import json def load_mb_tags(fhandle: TextIO) -> dict: """Load track metadata Args: fhandle (str or file-like): path or file-like object pointing to musicbrainz metadata file Returns: Dict: metadata of the track """ return json.load(fhandle)
2e53983bf743fd284095a017b1d6e277db80e69c
76,469
def norm1(m): """ Return the L1-norm of the point m """ s = 0.0 for (name, value) in m.items(): s += abs(value) return s
a6a74882cc86a8ca1b1a0e6dfbcdf5b9c3b11569
76,471
def is_isogram(string): """ Check if the text passed in is an isogram. :param string string - Text to check :return bool - Isogram, or not. - Filter for alpha filter + list will convert the string to a list - Count occurences of each charater if theres more than 1 then it's not a isogram * string is lowercased while filtering to facilitate comparion (count) """ text = list(filter(lambda c: c.isalpha(), string.lower())) for c in text: if text.count(c) > 1: return False return True
185c66c4f3da9eeaf71c3b5089931fbc60ffd403
76,473
def entity_resource_file(entity, resource_info): """ Return a file object that reads out the content of a resource attached to a specified entity. """ return entity.resource_file(resource_info["resource_path"])
b126fbbad79e87c9c20390e02e2c134a4bfb539c
76,474
import itertools def _split_into_words(sentences): """Splits multiple sentences into words and flattens the result""" return list(itertools.chain(*[_.split(" ") for _ in sentences]))
458e1eeac7d05c1646057af9ab8a039b22568b87
76,477
import itertools def peek_next(iterable): """ Peek next element of iterable. Parameters ---------- iterable Iterable to peek the next element from. Returns ------- next_item Element peeked from `iterable`. new_iterable Iterable behaving like if the original `iterable` was untouched. """ next_item = next(iterable) return next_item, itertools.chain([next_item], iterable)
8b249a5159c2c88188c3daa75c9ad37f2a4cbf2b
76,481
def gen_rel_xsd_path(branch_path, xsd_path): """Generate the relative part of the XSD path that follows under `branch_path`. Args: branch_path: str Absolute path to a branch holding all the XSD files for a single formatId. xsd_path: str Absolute path to an XSD file under the ``branch_path``. Returns: path: str E.g.: branc_path = /schema/isotc211/ xsd_path = /schema/isotc211/gmd/applicationSchema.xsd -> gmd/applicationSchema.xsd """ assert xsd_path.startswith(branch_path) return xsd_path[len(branch_path) :]
50730daa795d44f3b9ad887c50cb2297dd24ebeb
76,485
import inspect def for_own_methods(method_decorator): """ Decorates all the methods in a class. This function takes a function decorator and returns a class decorator that applies the function decorator to all of the methods in a class. The function decorator will only be applied to methods that are explicitly defined on the class. Any inherited methods that aren't overridden or altered will not be decorated. Args: method_decorator (function): Method decorator to be applied to each method of the class Returns: function: A class decorator """ def decorate(cls): def predicate(member): return inspect.ismethod(member) and member.__name__ in cls.__dict__ for name, method in inspect.getmembers(cls, predicate): setattr(cls, name, method_decorator(method)) return cls return decorate
e5458a9d304f02598ab6ac51edd967e97c6af4b0
76,486
def gen_DroneLog(drone_id, log_string): """Generate a Drone log object from log string.""" dronelog = { "@type": "DroneLog", "DroneID": drone_id, "LogString": log_string } return dronelog
806efd33b5cca988cf0ef78d65553a3a49d8fe58
76,489
from typing import Optional import random def random_product( *iterables, repeat: int = 1, rng: Optional[random.Random] = None, ): """ >>> from random import Random >>> random = Random(1) >>> suits = 'CDHS' >>> ranks = list(range(2, 11)) + ['J', 'Q', 'K', 'A'] >>> card = random_product(ranks, suits, rng=random) >>> card (4, 'C') >>> random_product(ranks, suits, repeat=5, rng=random) (6, 'C', 9, 'S', 9, 'S', 'A', 'D', 3, 'S') """ choice = random.choice if rng is None else rng.choice space = [tuple(value) for value in iterables] * repeat return tuple(choice(item) for item in space)
3bfa4e6cd7942da29cae396865547946bb4fc175
76,491
from urllib.parse import quote as urlquote import re import string def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False): """Converts any URLs in text into clickable links. If trim_url_limit is not None, the URLs in link text longer than this limit will truncated to trim_url_limit-3 characters and appended with an elipsis. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. If autoescape is True, the link text and URLs will get autoescaped. *Modified from Django* """ LEADING_PUNCTUATION = ['(', '<', '&lt;'] TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '&gt;'] word_split_re = re.compile(r'([\s\xa0]+|&nbsp;)') # a0 == NBSP punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \ ('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') # del x # Temporary variable def escape(html): return html.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;') trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x words = word_split_re.split(text) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = None if '.' in word or '@' in word or ':' in word: match = punctuation_re.match(word.replace('\u2019', "'")) if match: lead, middle, trail = match.groups() middle = middle.encode('utf-8') middle = middle.decode('utf-8') # Bytes to str # Make URL we want to point to. url = None if middle.startswith('http://') or middle.startswith('https://'): url = urlquote(middle, safe='%/&=:;#?+*') elif middle.startswith('www.') or ('@' not in middle and \ middle and middle[0] in string.ascii_letters + string.digits and \ (middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))): url = urlquote('http://%s' % middle, safe='%/&=:;#?+*') elif '@' in middle and not ':' in middle and simple_email_re.match(middle): url = 'mailto:%s' % middle nofollow_attr = '' # Make link. if url: trimmed = trim_url(middle) if autoescape: lead, trail = escape(lead), escape(trail) url, trimmed = escape(url), escape(trimmed) middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed) words[i] = '%s%s%s' % (lead, middle, trail) elif autoescape: words[i] = escape(word) elif autoescape: words[i] = escape(word) return "".join(words)
6e69864ab2fd7c3f7df23b3266ee9f497b6126d8
76,493
def to_dict_with_custom_fields(instance, custom_fields): """ Convert an object to a dictionary with only some of the fields that exist on the object. Some fields also require some manual handling. :param instance: Object to be converted. :param custom_fields: List of fields to include in dict. :return: Dictionary with custom fields. """ result = {} for field in instance._meta.fields: if field.name in custom_fields: if field.name == "avatar": # CoreUser field result[field.name] = instance.avatar.path if instance.avatar else None elif field.name == "logo": # Client field result[field.name] = instance.logo.path if instance.logo else None elif field.name == "country": # User field result[field.name] = instance.country.code if instance.country else None elif field.name == "organisation": # User field result["organisation_id"] = \ instance.organisation.id if instance.organisation else None else: result[field.name] = getattr(instance, field.name) return result
aba609da188821e47cccb5eebb1d936d0960ba5e
76,497
def get_if_exist(data, keys): """ Recursively get a value from a nested dictionary Parameters ---------- data : dict The (nested) dictionary keys : list The list of keys to fetch Returns ------- any or None The value at data[keys[0]][keys[1]] etc. or None if a key is not found. """ if keys[0] in data: if len(keys) == 1: return data[keys[0]] else: return get_if_exist(data[keys[0]], keys[1:]) else: return None
35e463473a4f85abe63b23876c1fc372e13f2072
76,499
import json def build_keyboard(items): """ Build a keyboard from the notes showing each note id in a row, and return the required json to display it @items: dictionary with notes to display """ keyboard = [[str(key)] for key in items.keys()] reply_markup = {"keyboard": keyboard, "one_time_keyboard": True} return json.dumps(reply_markup)
28b37fb501169fa4dfb9aa6462c5f33d52451f69
76,500
def readFile(filename): """Read a file into memory Args: filename: Name of the file to be read Returns: Variable wholeFile containing the entire file """ # home = expanduser("~") # cwd = os.getcwd() # print ("$HOME:%s | CWD:%s" % (home,cwd)) f = open(filename, 'r') wholeFile = f.readlines() f.close() for i in range(len(wholeFile)) : wholeFile[i] = wholeFile[i].replace('\r', '') wholeFile[i] = wholeFile[i].replace('\n', '') return(wholeFile)
9d3620525a35882c30abb088e4b3b3daad00ef94
76,501
import torch def multi_quantile_huber_loss(quantiles: torch.Tensor, target: torch.Tensor, delta: float = 0.1) -> torch.Tensor: """Multi-quantile Huber loss The loss for simultaneous multiple quantile regression. The number of quantiles n is ``quantiles.shape[-1]``. ``quantiles[..., k]`` is the quantile value estimation for quantile :math:`(k + 0.5) / n`. For each prediction, there can be one or multiple target values. This loss is described in the following paper: `Dabney et. al. Distributional Reinforcement Learning with Quantile Regression <https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewFile/17184/16590>`_ Args: quantiles: batch_shape + [num_quantiles,] target: batch_shape or batch_shape + [num_targets, ] delta: the smoothness parameter for huber loss (larger means smoother). Note that the quantile estimation with delta > 0 is biased. You should use a small value for ``delta`` if you want the quantile estimation to be less biased (so that the mean of the quantile will be close to mean of the samples). Returns: loss of batch_shape """ num_quantiles = quantiles.shape[-1] t = torch.arange(0.5 / num_quantiles, 1., 1. / num_quantiles) if target.ndim == quantiles.ndim - 1: target = target.unsqueeze(-1) assert quantiles.shape[:-1] == target.shape[:-1] # [B, num_quantiles, num_samples] d = target[..., :, None] - quantiles[..., None, :] if delta == 0.0: loss = (t - (d < 0).float()) * d else: c = (t - (d < 0).float()).abs() d_abs = d.abs() loss = c * torch.where(d_abs < delta, (0.5 / delta) * d**2, d_abs - 0.5 * delta) return loss.mean(dim=(-2, -1))
b4385668e83ce5bb424cea07f38253042a397a95
76,504
def pair_hexvalue(value, delimiter=":"): """ Pair hex values (string) using delimiter. e.g. abcdef -> ab:cd:ef :param value: :param delimiter: :return: """ return delimiter.join( ["{}{}".format(a, b) for a, b in zip(value[::2], value[1::2])] )
1cd1349bd9cc419e15e365bd243a9a5b46d5470c
76,507