content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import random def generate_random_numbers(n=1, bits=32): """ Generates n random numbers. :param n: number of random numbers to generate :param bits: number of bits each random number should contain. :return: list of long integer random numbers. """ return [random.getrandbits(bits) for _ in range(0, n)]
7bd2e1018ef06a47f5ff5efd044abb3a8115999f
84,375
def row_as_json(sqlite_row): """Return a dict from a sqlite_row.""" return { key: sqlite_row[key] for key in sqlite_row.keys() }
828e130cc88b0a020702eec95eca4ae95bf98644
84,376
from typing import Dict from typing import Set def _indicate_uncalled_modules( statistics: Dict[str, Dict[str, str]], stat_name: str, uncalled_modules: Set[str], uncalled_indicator: str = "N/A", ) -> Dict[str, Dict[str, str]]: """ If a module is in the set of uncalled modules, replace its statistics with the specified indicator, instead of using the existing string. Assumes the statistic is already formatting in string form. Args: statistics (dict(str, dict(str, str))) : the statistics to format. Organized as a dictionary over modules, which are each a dictionary over statistic types. Expects statistics have already been converted to strings. stat_name (str) : the name of the statistic being modified uncalled_modules set(str) : a set of names of uncalled modules. indicator (str) : the string that will be used to indicate unused modules. Defaults to 'N/A'. Returns: dict(str, dict(str, str)) : the modified statistics """ stats_out = {mod: stats.copy() for mod, stats in statistics.items()} for mod in uncalled_modules: if mod not in stats_out: stats_out[mod] = {} stats_out[mod][stat_name] = uncalled_indicator return stats_out
56592b113019b6d262a7540a2590397afa429cb0
84,377
import statistics def find_heavy_genes(weight_matrix): """ Return the heavy genes based on input weight_matrix. The return value is a dict, in which each key is a positive or nagative sigature name, and each value is a nested dict, whose keys are heavy genes' systematic names, and values are corresponding weights. """ heavy_genes = dict() for node_name, genes_weights in weight_matrix.items(): weights = genes_weights.values() mean = statistics.mean(weights) std_dev = statistics.stdev(weights) pos_sig_name = node_name + "pos" neg_sig_name = node_name + "neg" heavy_genes[pos_sig_name] = dict() heavy_genes[neg_sig_name] = dict() for g, w in genes_weights.items(): if w > mean + 2.5 * std_dev: heavy_genes[pos_sig_name][g] = w elif w < mean - 2.5 * std_dev: heavy_genes[neg_sig_name][g] = w return heavy_genes
5e02ccc7c87ed134f574e5d392543d32b00e7327
84,378
def format_repo_info(vcs_name, vcs_path, vcs_type, integration_status): """Helper function for creating dict containing repository information :param str vcs_name: name of the repository :param str vcs_path: absolute path to the repository :param str vcs_type: type of the underlying vcs :param str integration_status: status of perun integration into the underlaying vcs :return: dict containing repository information """ return { 'name' : vcs_name, 'path': vcs_path, 'vcs_type' : vcs_type, 'integration': integration_status }
6d80ed0991f3b5b84716a2ff80ccedbedc7cbb73
84,380
def _s_dist(circ_val1: float, circ_val2: float, r: float) -> float: """ Returns the length of the directed walk from circ_val1 to circ_val2, with the lowest value length as distance. based on https://www.codeproject.com/Articles/190833/Circular-Values-Math-and-Statistics-with-Cplusplus :param circ_val1: the starting circular value. :param circ_val2: the ending circular value. :param r: max angle. :return: the directed walk. """ distance = circ_val2 - circ_val1 if distance < -r / 2: return distance + r if distance >= r / 2: return distance - r return distance
b70800193e2bf7a83302ef80c0adeaa92bfba527
84,386
import re def count_occurrencies(word: str, input_string: str) -> int: """ Count word occurrences in a string. :param word: The word to find. :type word: str :param input_string: The input string where to search :type input_string: str :return: Counted occurrences. :rtype: str """ return sum(1 for _ in re.finditer(r"\b%s\b" % re.escape(word), input_string))
75cd1b34f8a0c65ac5f03dcdb6036021a2c6d89e
84,387
def _get_basket(skus): """ Count the elements of the skus """ basket = {} for item in skus: if item not in basket: basket[item] = 0 basket[item] += 1 return basket
63ebe5a10dbd69548a47d1bada4d7754d07e7d4d
84,388
def compute_patch_sizes(g_w=8, k=3, s=2): """computes size of patches that make up a glimpse, given size of first patch, scaling parameter, and number of patches Parameters ---------- g_w : int width of a single patch, in pixels. Default is 8. Patches are square, so height == width k : int number of "patches" (concentric squares) in a glimpse. Default is 3. s : int scaling term. Default is 2. The size of each consecutive concentric patch is g_w * (s ** k_n), where k_n is some integer (0, 1, ..., k-1). Returns ------- patch_sizes : list of length k. For the default values of g_w, k, and s, patch_sizes is [8, 16, 32]. """ patch_sizes = [] for patch_num in range(k): size = g_w * (s ** patch_num) size = (size, size) patch_sizes.append(size) return patch_sizes
a4b3e3cf5dd7b0e14e0b9a67b8b97f506be77e68
84,394
def filter_multiline_string_by_includes_excludes(multiline_string, includes=None, excludes=None, mkdwn_line_endings=False): """ Filter Multiline string with include/excludes and optional Markdown line endings. Args: multiline_string (): String to filter. includes (): List of strings for including lines if don't include an excluded word: ['important word', 'important word2'] excludes (): List of strings for excluding lines with: ['undesired word', 'unimportant word2'] mkdwn_line_endings (): Optional Markdown line endings with two spaces ' ' Returns: Filtered string """ if excludes is None: excludes = [] if includes is None: includes = [] filtered_string = '' for line in multiline_string.splitlines(): # print(line) if len(includes) > 0: if any(x in line for x in includes): if not any(x in line for x in excludes): filtered_string = filtered_string + '\n' + line elif len(includes) == 0: if not any(x in line for x in excludes): filtered_string = filtered_string + '\n' + line lines = filtered_string.split("\n") non_empty_lines = [line for line in lines if line.strip() != ""] string_without_empty_lines = "" for line in non_empty_lines: if mkdwn_line_endings is True: string_without_empty_lines += line + " \n" else: string_without_empty_lines += line + "\n" return string_without_empty_lines
33efc2825336a973f8f535c149e578bb8c074ae5
84,395
def color_array_by_value(value, palette, denom, mask_zeros): """ Figure out the appropriate RGB or RGBA color for the given numerical value based on the palette, denom, and whether zeros should be masked. """ if value == -1: # sentinel value return -1 if value == 0 and mask_zeros: # This value is masked if type(palette) is list: return (1, 1, 1) return (1, 1, 1, 1) if type(palette) is list: # This is a palette return palette[value] # This is continuous data so the palette is actually a colormap return palette(float(value)/float(denom))
441c2f1111974f39eddcf9112220a9c48d89b0d4
84,396
def int_to_str_digit(n): """ Converts a positive integer, to a single string character. Where: 9 -> "9", 10 -> "A", 11 -> "B", 12 -> "C", ...etc Args: n(int): A positve integer number. Returns: The character representation of the input digit of value n (str). """ # 0 - 9 if n < 10: return str(n) # A - Z elif n < 36: return chr(n + 55) # a - z or higher else: return chr(n + 61)
629f109275d61e5c17691447cdfe51ced4a6ab5a
84,398
def verify_ico(bin: str) -> bool: """Check whether IČO number is valid https://phpfashion.com/jak-overit-platne-ic-a-rodne-cislo :param bin: string of the BIN number :retun: bool """ if isinstance(bin, str): if not bin.isnumeric() or len(bin) > 8: return False # pad with zeros from left to length 8 (123456 -> 00123456) bin.rjust(8, "0") else: raise ValueError(f"'BIN' must be an instance of 'str', not '{type(bin)}'") a = 0 for i in range(7): a += int(bin[i]) * (8 - i) a %= 11 if a == 0: c = 1 elif a == 1: c = 0 else: c = 11 - a return int(bin[7]) == c
5810c8f65fbbf541faf2d811f808ad44652b6693
84,404
def rev_comp(seq): """Generate reverse complement of a DNA sequence""" final = '' d = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'} for base in reversed(seq): final += d[base] return final
51bf9911a2b94c452e3400e2c360cbe09f0e73cf
84,405
def coulomb_force(q1, q2, r): """ Calculates the force between two point charges. Applying coulombs law of F = k * q_1 * q_2 / r ** 2 to find the force of attraction/repulsion between two charges based on their distance from each other. :param q1: Scalar: Charge given in Coulombs :param q2: Scalar: Charge given in Coulombs :param r: Scalar: Distance between two charges given in meters :return: Scalar: Force between the two charges, given as Newtons """ k = 8.987e9 # N * m ** 2 / C ** 2 force = k * (q1 * q2) / r ** 2 return force
8d6f46a652d423a278ffff51da39cb88d45c5fd1
84,413
from typing import Dict from typing import List from typing import Tuple def parse_flight_data() -> Dict[str, List[Tuple[str, str, int]]]: """Parse flight data and returns dictionary of flights by month/year Returns: Dict[str, List[str, str, int]]: d['mm-yy'] = [(origin_code, dest_code, aircraft_code), ...] """ data_paths = { 'test': 'data/test_flight_data.csv', '11-19': 'data/Nov2019_flight_data.csv', '12-19': 'data/Dec2019_flight_data.csv', '01-20': 'data/Jan2020_flight_data.csv', '02-19': 'data/Feb2019_flight_data.csv', '03-19': 'data/Mar2019_flight_data.csv', '04-19': 'data/Apr2019_flight_data.csv', '05-19': 'data/May2019_flight_data.csv', '06-19': 'data/Jun2019_flight_data.csv', '07-19': 'data/Jul2019_flight_data.csv', '08-19': 'data/Aug2019_flight_data.csv', } flight_data = dict() for month_year, data_path in data_paths.items(): flight_data[month_year] = list() with open(data_path) as f: lines_to_skip = 1 i = 0 for line in f: if i < lines_to_skip: i += 1 continue split_line = line.rstrip().split(',') try: int(split_line[11]) except: raise Exception('Bad line: {}'.format(line)) flight_data[month_year].append((split_line[4], split_line[8], int(split_line[11]))) return flight_data
c9dbcdb70d5097b34c1e9c9f76128f583eb63102
84,417
def timestep_to_years(init_year, timestep): """Returns list of years in simulation Parameters ---------- init_year: int initial year in simulation timestep: np.array timestep of simulation (months) Returns ------- array of years """ return init_year + (timestep / 12)
a8522751ef9e0f8f8b465074ed3ad9334ce05749
84,420
def contains_negative(collection): """Return True if any negative value exists in the collection.""" return any(n < 0 for n in collection)
266c4857bed9f55cf4504ee7e3728b33e03adc8b
84,423
import sqlite3 def connect_sqlite(db_file): """Creates and returns a db connection and cursor.""" connection = sqlite3.connect(db_file, timeout=30) cursor = connection.cursor() return connection, cursor
d19d2330f06b91e045dcd8afc33c03b0db1ccc7c
84,425
def count_inversions(array, blank): """Returns the number of inversions in a list ignoring the blank value.""" count = 0 for i, tile1 in enumerate(array[:-1]): if tile1 is not blank: for tile2 in array[i + 1 :]: if tile2 is not blank: if tile1 > tile2: count += 1 return count
7f70df885e39f10ad70343c302756e25f1573305
84,426
def _format_csv(record: dict) -> str: """formats the values of the record as comma separated values """ return ','.join([str(val) for val in record.values()])
8c38cbf4d253af137b0d19e556c078ca163cb3e1
84,429
def parse_csv_string(s): """Parse a simple comma-separated tokens to a list of strings, as used in the qop parameter in Digest authentication. """ return [x for x in (x.strip() for x in s.split(',')) if x != '']
a6a01ff5a3ec843a6a4eb5d918a2dc592f19135b
84,430
def bp_from_digit(digit): """ Inverse of dna_digit. >>> bp_from_digit(3) 't' >>> bp_from_digit(7) Traceback (most recent call last): ... ValueError: only 0, 1, 2, or 3 accepted """ # You must use the following dictionary: bp_map = {0: 'a', 1: 'c', 2: 'g', 3: 't'} if digit in bp_map: return bp_map[digit] # returns only values with corresponding keys else: raise ValueError("only 0, 1, 2, or 3 accepted")
340ba53cebb4dddd6e15b8a43c7e2d2133414e9c
84,438
import json def read_secret_file(file_path): """ Reads the content of the secret file and returns the secret key. """ with open(file_path, 'r') as secret_file: content = secret_file.read() obj = json.loads(content) return obj['secret_key'] print('Secret key faile failed to open!!') return ''
e0a0cc0ad3c9c6342cc157894de5176129aac475
84,442
def _is_world_over(info): """Return a boolean determining if the world is over.""" # 0x0770 contains GamePlay mode: # 0 => Demo # 1 => Standard # 2 => End of world return info["gameMode"] == 2
dec1da5d4d41710b3d4f4872042d49fde0936bbc
84,445
def c_to_kelvin(temp_c: float) -> float: """ converter from celsius to kelvin :param temp_c: temperature in celsius :return: temperature in kelvin """ return 273.15 + temp_c
ec0286a7816ea1fe5e0e5df8e5c96a84434c28cf
84,447
def get_maintenance_status(code): """Get maintenance status from code.""" maintenance_status = {0: "No maintenance", 1: "In progress"} if code in maintenance_status: return maintenance_status[code] + " (" + str(code) + ")" return "Unknown ({})".format(str(code))
afb568bffe30fbea6e5c6ab40a5826611d762822
84,448
def main_DNA(dna): """Counting the respective number of times that the symbols 'A', 'C', 'G', and 'T' occur in dna.""" if len(dna) > 1000 or len(dna) <= 0: raise Exception('Input Error') dna_counts = map(dna.count, ['A', 'C', 'G', 'T']) return ' '.join(map(str, dna_counts))
95700d7e18d83edb146dcb56043eeb0a83f882fb
84,453
def namelist_block(config, namelist): """Return the namelist block as a string.""" if namelist not in config: return '' block = namelist + '\n' for key in config[namelist]: if config[namelist][key]: value = config[namelist][key].strip() block += '{} = {}\n'.format(key, value) block += '/\n' return block
99a286935210e85e97d8d71b1a86730afe5465c0
84,463
def get_aws_account_id_from_arn(lambda_arn): """ retrieves and return aws account id from an arn :param lambda_arn: arn of a calling lambda :type lambda_arn: string :returns: aws account id :rtype: string """ return lambda_arn.split(':')[4]
2cbee9e88a7ec82bb03c4801369e3efe0baafce4
84,468
def get_symbol_module(sym): """ Get the module name belonging to a symbol """ return sym.namespace.name[0]
542311fd785df4474df6012576d95278e6805b2d
84,471
def get_cross_correlation(dLT_dTAU, dLT_dLU, dLT_dLD, S_TAU, S_LU, S_LD): """Calculates the cross correlation term, which is part of the surface temperature uncertainty estimation. Args: dLT_dTAU <numpy.2darray>: transmission partial dLT_dLU <numpy.2darray>: upwelled partial dLT_dLD <numpy.2darray>: downwelled partial S_TAU <numpy.2darray>: transmission uncertainty S_LU <numpy.2darray>: upwelled uncertainty S_LD <numpy.2darray>: downwelled uncertainty Returns: cross_correlation <numpy.2darray>: cross correlation term """ # Correlation coefficients from MODTRAN simulations using MERRA. corr_tau_lu = -0.9899 corr_tau_ld = -0.9857 corr_lu_ld = 0.9965 # Calculate cross correlation terms part_tau_lu = 2 * corr_tau_lu * dLT_dTAU * dLT_dLU * S_TAU * S_LU part_tau_ld = 2 * corr_tau_ld * dLT_dTAU * dLT_dLD * S_TAU * S_LD part_lu_ld = 2 * corr_lu_ld * dLT_dLU * dLT_dLD * S_LU * S_LD # Calculate cross correlation cross_correlation = part_tau_lu + part_tau_ld + part_lu_ld # Memory cleanup del part_tau_lu del part_tau_ld del part_lu_ld return cross_correlation
08cbc2370b106e68c945cc384418d679806e8eb5
84,476
import copy def expand_list_list(mylist): """Recursive function. Takes a list of lists and lists of lists and returns a list of flat lists. Example: [[1,2],[[4,5],[6,7]]] -> [[1,2,4,5], [1,2,6,7]] """ res = [] if not mylist or len(mylist) == 1 and not mylist[0]: return [[]] # Check the first element is at least a list assert isinstance(mylist[0], list), \ "Expand_list_list needs a list of lists and lists of lists" # Recursion stop condition, one single element if len(mylist) == 1: if isinstance(mylist[0][0], list): return mylist[0] else: return mylist if isinstance(mylist[0][0], list): for item in mylist[0]: # Here the recursion happens, create lists starting with # each element of the first item and completed with # the rest expanded for rest in expand_list_list(mylist[1:]): reslist = copy.copy(item) reslist.extend(rest) res.append(reslist) else: for rest in expand_list_list(mylist[1:]): reslist = copy.copy(mylist[0]) reslist.extend(rest) res.append(reslist) return res
0e26f3ebc441b1b9fad9f13695ec490e26cfb683
84,486
def election_slug(state, start_date, race_type, special=False, **kwargs): """ Generate a standardized election identifier string. Args: state: Lowercase state postal abbreviation. For example, "md". start_date: Start date of election, in the form YYYY-MM-DD. Required. race_type: Race type, for example "general" or "primary". Required. special: Boolean indicating whether the election is a special election. Default is False. Returns: String formatted like: ``{state_abbrev}-YYYY-MM-DD-(special)-{race_type}`` For example, "ar-2012-05-22-primary". """ bits = [ state.lower(), start_date, ] if special: bits.append('special') bits.append(race_type.lower()) return "-".join(bits)
b29adec38b3168e0a02038162034c9bfef0d931b
84,487
def set_public_or_private(objectname): """ If an object (file/folder) starts with a specified character(s), then make it private (False/0), otherwise make public (True/1) """ private_identifier = "_" if objectname.startswith(private_identifier): return 0 else: return 1
e94dc8552caefc17f35d17d022ad1d81b7a72a9e
84,490
from typing import Tuple import asyncio import threading def setup_loop_in_thread() -> Tuple[asyncio.AbstractEventLoop, threading.Thread]: """Sets up a new asyncio event loop in a new thread, and runs it forever. Returns: A tuple containing the event loop and the thread. """ loop = asyncio.new_event_loop() thread = threading.Thread(target=loop.run_forever, daemon=True) thread.start() return loop, thread
cd8e3c882cd50ddb7fbee4b0653c981c1c3bdb8a
84,491
def _get_pd_fields(d, prefix, id): """Retrieve form fields representing a product.""" fields = ['name', 'price', 'quantity_per_package', 'unit', 'quantity_limit', 'quantum', 'unit_weight'] raw = {f: d.get("%s%d-%s" % (prefix, id, f), None) for f in fields} if not any(f for f in raw.values()): return {'deleted': True} # All fields empty means deleted qpp = raw['quantity_per_package'] quota = raw['quantity_limit'] quantum = raw['quantum'] weight = raw['unit_weight'] return {'name': raw['name'], 'price': float(raw['price']), 'quantity_per_package': int(qpp) if qpp else None, 'unit': raw['unit'] or u'pièce', 'quantity_limit': int(quota) if quota else None, 'quantum': float(quantum) if quantum else None, 'unit_weight': float(weight) if weight else None, 'deleted': "%s%d-deleted" % (prefix, id) in d}
c7160759c43fbf9f9a7c94af4b3cf811eb3909e6
84,492
import re def sanitize_filename(input_filename): """ Function that removes characters from a putative filename which might be problematic for filesystems. Parameters ------------ input_filename : str Proposed filename Returns --------- str Returns a string with the nicely formatted filename """ # this regular expression replace every character not equal to a # WORD character (i.e. alphanumeric or underscore) with a space s = re.sub(r'\W+', '_', input_filename) return s
d4548befaa7b21edde35221f52f9f03f89aaa1e8
84,497
import re def is_date(token): """Is the string is a date. :param token: string :return: Boolean """ regexp = re.compile(r'^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))' r'(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|' r'[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|' r'(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$') if regexp.search(token) is not None: return True else: return False
fdf1d5a88cb76a884a5a85b94cf82833f1a73249
84,501
from typing import Dict from typing import Any from pathlib import Path import yaml def load_config(fpath: str) -> Dict[str, Any]: """Loads a configuration from a path to a YAML file, allows for inheritance between files using the ``base_config`` key. Parameters ---------- path : str Path to the (YAML) config file """ path = Path(fpath) with open(path) as handle: config = yaml.load(handle, Loader=yaml.FullLoader) if 'base_config' in config: base_config = load_config(path.parent / config['base_config']) base_config.update(config) return base_config else: return config
8b3d708f4ec441dc5be661efb5fc243d7d4d57fd
84,505
def pH1_with_apriori(alpha, pi=0.5, beta=.8): """ Returns the probability of H1 if we know power and a priori """ return (1 - beta) * pi / ((1 - beta) * pi + alpha * (1 - pi))
fe7b296b4d8dfbb4a2b0813b7589dcd34b4a030e
84,508
def conf_interval(lb_95, ub_95, rounding=4): """Set the confidence interval as a string of str((Min_CI; Max_CI)), where the CI's can be rounded""" return f"{round(lb_95, rounding)}; {round(ub_95, rounding)}"
c33165e59f3f07fd85b4b336d536d5ff04b76101
84,511
import re def tp_key_value(str_tag): """ Extract a key and a value from a string assumed to be a TaskPaper tag """ rgx_split = re.compile(r'[\@\(\)\{\}]') str_key, str_value = '', '' # count the pieces lst_parts = rgx_split.split(str_tag) lng_parts = len(lst_parts) # and winnow the noise if lng_parts > 1: str_key = lst_parts[1] if lng_parts > 2: for str_value in lst_parts[2:]: if str_value != '': break return (str_key, str_value)
1121999e697924896a97e1fe8ffb225b71d36dad
84,512
def calc_propositions(word_list): """ Returns the number of propositions in the given list of words. """ props = 0 for word in word_list: if word.isprop: props += 1 return props
cc744c49fa0ba52d02720645311bf5d983e531b4
84,519
def take_data(data, mask): """Selects correlation(difference) data at given masked indices. Parameters ---------- data : tuple of ndarrays Data tuple as returned by `ccorr` and `acorr` functions mask : ndarray A boolean frame mask array Returns ------- out : tuple Same data structure as input data, but with all arrays in data masked with the provided mask array. """ def _mask(i,data): if i !=1: data = data[...,mask,:] if data is not None else None return data return tuple((_mask(i,d) for (i,d) in enumerate(data)))
907b23ca8c15f254d65dad94c789143ceea234fa
84,522
def parse_ignore_classifiers(value): """Extract flag ignore classifiers value from the param for validation. :param value: Input ignore_classifiers value. :return: A Boolean value [True, False], otherwise ``None``. """ if value == 'true': return True elif value == 'false': return False else: return None
85896c96f5e262bae302ee78314959590d68d87c
84,523
def width_def(backend): """ Function to extract the width of a CNOT pulse Parameters ---------- backend : qiskit backend DESCRIPTION. Returns ------- width : int samples of the duration of the pulse, in terms of time it will be multiplied by 0.22ns. """ cx_def = backend.defaults().instruction_schedule_map.get("cx",[0,1]) for inst in cx_def.instructions: if (inst[1].duration > 0) and ("CR" in inst[1].name): width = inst[1].pulse.width return width
d76778f1378c95c7911feed0a537ebdb4dc78815
84,525
import re def get_cases(test_suite, test_name_regex): """ Return case list which name matched regex. """ cases = [] for test_case_name in dir(test_suite): test_case = getattr(test_suite, test_case_name) if callable(test_case) and re.match(test_name_regex, test_case_name): cases.append(test_case_name) return cases
eee9696b080910b629a035f028167a214ad98191
84,527
def get_email_headers(email) -> dict: """ Extracts the headers from the email and returns them as a dict. :param email: The email to extract the sender from. :return: The headers of the email. """ headers = {"sender": "N/A", "subject": "N/A"} for header in email["payload"]["headers"]: if header["name"] == "From": headers["sender"] = header["value"] elif header["name"] == "Subject": headers["subject"] = header["value"] return headers
22874a9b7cd39dbdb88aeb6ea0e6f7d9c9ce71f6
84,528
from typing import Coroutine import asyncio def run_coro(coro: Coroutine): """ Run an async coroutine and wait for the results Args: coro (CoroutineType): The coroutine to execute Returns: The result of the coroutine """ event_loop = None try: event_loop = asyncio.get_event_loop() except RuntimeError: event_loop = asyncio.new_event_loop() asyncio.set_event_loop(event_loop) return event_loop.run_until_complete(coro)
87e7b5a9a7d5a61b90196537663766e3e58937ba
84,529
def validate_dataset(columns, rows): """Validate that (i) each column has a unique identifier, (ii) each row has a unique identifier, and (iii) each row has exactly one value per column. Returns the maximum column and row identifiers. Raises ValueError in case of a schema violation. Parameters ---------- columns: list(vizier.datastore.dataset.DatasetColumn) List of columns. It is expected that each column has a unique identifier. rows: list(vizier.datastore.dataset.DatasetRow) List of dataset rows. Returns ------- int, int """ # Ensure that all column identifier are zero or greater, unique, and # smaller than the column counter (if given) col_ids = set() for col in columns: col_id = col.identifier if col.identifier < 0: raise ValueError('negative identifier %d' % (col_id)) elif col.identifier in col_ids: raise ValueError('duplicate identifier %d' % (col_id)) col_ids.add(col_id) # Ensure that all row identifier are zero or greater, unique, smaller than # the row counter (if given), and contain exactly one value for each column row_ids = set() for row in rows: row_id = row.identifier if len(row.values) != len(columns): raise ValueError('schema violation for row %d' % str(row_id)) elif row.identifier < 0: raise ValueError('negative row identifier %d' % str(row_id)) elif row.identifier in row_ids: raise ValueError('duplicate row identifier %d' % str(row_id)) row_ids.add(row_id) max_colid = max(col_ids) if len(col_ids) > 0 else -1 max_rowid = max(row_ids) if len(row_ids) > 0 else -1 return max_colid, max_rowid
890681e0ec7ac5c353348d57a6709db0e7c60db7
84,530
def clean_file_record(raw_record_string): # type: (str) -> str """ Performs some basic cleansing of the provided string, and returns it """ return raw_record_string.replace('\x00', '')
32f862ba9bd51929b6485113d9e629a60874a12b
84,531
def gdk(dd,n=0): """Get-dict-key; returns n-th key of dict dd.""" return list(dd.keys())[n]
67a458d9722af9fa3ba4841ab5d05bdec68c2498
84,532
def string_between( text, begin_str, end_str, incl_begin=False, incl_end=False, greedy=True): """ Isolate the string contained between two tokens Args: text (str): String to parse begin_str (str): Token at the beginning end_str (str): Token at the ending incl_begin (bool): Include 'begin_string' in the result incl_end (bool): Include 'end_str' in the result. greedy (bool): Output the largest possible string. Returns: text (str): The string contained between the specified tokens (if any) Examples: >>> string_between('roses are red violets are blue', 'ses', 'lets') ' are red vio' >>> string_between('roses are red, or not?', 'a', 'd') 're re' >>> string_between('roses are red, or not?', ' ', ' ') 'are red, or' >>> string_between('roses are red, or not?', ' ', ' ', greedy=False) 'are' >>> string_between('roses are red, or not?', 'r', 'r') 'oses are red, o' >>> string_between('roses are red, or not?', 'r', 'r', greedy=False) 'oses a' >>> string_between('roses are red, or not?', 'r', 's', True, False) 'rose' >>> string_between('roses are red violets are blue', 'x', 'y') '' """ incl_begin = len(begin_str) if not incl_begin else 0 incl_end = len(end_str) if incl_end else 0 if begin_str in text and end_str in text: if greedy: begin = text.find(begin_str) + incl_begin end = text.rfind(end_str) + incl_end else: begin = text.find(begin_str) + incl_begin end = text[begin:].find(end_str) + incl_end + begin text = text[begin:end] else: text = '' return text
7d486103e6d0ca0436efa7b908cd3c0f6f071785
84,534
from typing import List def assign_non_encoder_layer_ids( non_encoder_names: List[str], layer_id: int, ): """ Assign the provided id to non-encoder layers. Parameters ---------- non_encoder_names Names layers not belonging to an encoder. layer_id provided id. Returns ------- A dictionary mapping the layer names (keys) to their ids (values). """ name_to_id = {} for n in non_encoder_names: name_to_id[n] = layer_id return name_to_id
ae73a009ad22ae337c9d02403849c448a5bd5d56
84,535
def read_exclude_words (f_words) : """Get the words to exclude from the computation of mentions""" exclude_list =[] fi = open(f_words, mode='r', encoding='utf-8') for line in fi: line = line.rstrip() exclude_list.append(line) fi.close() return exclude_list
62ce9675cb39c24509e75b2bc729aaf1dd06266a
84,536
import itertools def ensure_unique(qs, field_name, value, exclude_id=None): """ Makes sure that `value` is unique on model.fieldname. And nonempty. """ orig = value if not value: value = "None" for x in itertools.count(1): if not qs.exclude(id=exclude_id).filter(**{field_name: value}).exists(): break if orig: value = '%s-%d' % (orig, x) else: value = '%d' % x return value
24b46c43ade8d33cc66cbaa5513bb40ed85cfb4b
84,537
def check_attendance(registrants, attendees, config): """Iterates through the list of registrants and checks each registrant against the collection of emails of the actual attendees. If the registrant's email address is present in the collection of attendees, the ATTENDED_FIELD field is set to true for the registrant. This function maintains a set of registered attendee email addresses. The difference between the original attendee set and the set of registered attendees is treated as the collection of attendees that did not register. This collection of individuals that attended, but did not register is iterated over and corresponding registration records are created for each unregistered attendee. Note that these new records will not contain most of the registration information. Args: registrants - list of all registration records attendees - dictionary containing the actual attendee records, with email addresses forced to lowercase as the keys config - ConfigParser object containing the configuration data Returns: Dictionary containing the attendance counts. """ # Field name mappings, note that the "_att" suffixes apply to the # attendee file, while "_reg" applies to the registration file email_field_reg = config['REGISTRANTS']['EMAIL_FIELD'] email_field_att = config['ATTENDEES']['EMAIL_FIELD'] attended_field = config['REGISTRANTS']['ATTENDED_FIELD'] first_nm_field_reg = config['REGISTRANTS']['FIRST_NM_FIELD'] first_nm_field_att = config['ATTENDEES']['FIRST_NM_FIELD'] last_nm_field_reg = config['REGISTRANTS']['LAST_NM_FIELD'] last_nm_field_att = config['ATTENDEES']['LAST_NM_FIELD'] attend_dur_reg = config['REGISTRANTS']['ATTEND_DUR_FIELD'] attend_dur_att = config['ATTENDEES']['ATTEND_DUR_FIELD'] na_val = config['REGISTRANTS']['NOT_AVAIL'] def proc_unreg(unregistered, reg_fields): """Factory method to create a new registration record for the given email address. NOTE: the default value for all fields is specified in this method Args: unregistered - dictionary containing the information of an individual that attended the event. Will be used for the new registration record reg_fields - list of strings identifying the field names (used as keys in this implementation) for the registration record. Returns: counts - dictionary containing attendance statistics """ # Initialize the dictionary representing the new record with the # default field value, then set the ATTENDED_FIELD field to True, # finally add the new row to the collection of registrants. new_reg = {fld:na_val for fld in reg_fields} new_reg[email_field_reg] = unregistered[email_field_att] new_reg[last_nm_field_reg] = unregistered[last_nm_field_att] new_reg[first_nm_field_reg] = unregistered[first_nm_field_att] new_reg[attend_dur_reg] = unregistered[attend_dur_att] new_reg[attended_field] = True registrants.append(new_reg) print("Unregistered attendee: " + repr(unregistered[email_field_att])) # Keeps the counts of registrants, attendees, etc. counts = {'registrants':len(registrants) ,'attendees':len(attendees) ,'reg_no_attend':0 ,'attend_no_reg':0 } # We'll keep a list of the attendees that were also registered. This list # will be used to deduce the list of attendees that weren't registered. registered = [] reg_fields = list(registrants[0].keys()) # We'll need a set identify the group of attended, but unregistered # individuals, so we might as well use it for checking attendance, as # well attendee_set = frozenset(attendees.keys()) for reg in registrants: reg_email = reg[email_field_reg].lower() if reg_email in attendee_set: reg[attended_field] = True reg[attend_dur_reg] = attendees[reg_email][attend_dur_att] registered.append(reg_email) # We should have all of the registered attendees marked. The registrants # that did not attend are marked when the registration list was processed. # Now we have to deal with the attendees that weren't registered unregistered = attendee_set - set(registered) for unreg in unregistered: proc_unreg(attendees[unreg], reg_fields) # Complete the counts counts['attend_no_reg'] = len(unregistered) counts['reg_no_attend'] = counts['registrants'] - (counts['attendees'] - counts['attend_no_reg']) return counts
a7128144a5cde28ef64037b5dfe28e36b390c9c3
84,538
def to_tuple_list(edge_index): """Transform a coo-format edge_index to a list of edge tuples.""" return [tuple(item) for item in edge_index.T.cpu().numpy()]
08dfb54976b932e34a5cf395cfe65e01ef97b1fd
84,539
def if_none(value, default): """Return default if value is None.""" return value if value is not None else default
b0af7a68c4a14d48ffe6041b2baf724ce28013d8
84,540
import re def process_rider(row): """Does preprocessing related to each individual rider: - Combines FirstName and LastName - Removes row (replaces with empty dict) if racer has a missing name or the name contains digits - Consolidates age columns """ # Missing names - there may be more! if (row['RacerID'] in [3288, 61706, 832, 351]) \ or (not row['FirstName']) \ or (row['FirstName'] == 'Unknown') \ or (re.search(r'[\d]', row['FirstName'])) \ or (re.search(r'[\d]', row['LastName'])): return {} # Combine names row['Name'] = ' '.join([row['FirstName'], row['LastName']]) # Combine age row['Age'] = max(row['CalculatedAge'] or 0, row['ReportedAge'] or 0) return row
bc48488c18c95215b5790a4541269ccba401cb9f
84,541
def formatter(name, default=False): """Decorate a Feature method to register it as an output formatter. All formatters are picked up by the argument parser so that they can be listed and selected on the CLI via the -f, --format argument. """ def decorator(func): func._output_format = dict(name=name, default=default) return func return decorator
c76751943d092e2f89fe4a08df805df79f317b18
84,543
import string import re def remove_punctuation(word): """ Removes all punctuation marks from a word except for ' that is often a part of word: don't, it's, and so on """ all_punct_marks = string.punctuation.replace("'", '') return re.sub('[' + all_punct_marks + ']', '', word)
02667dbeada5fdd224f62852b2ec44590bb2b618
84,550
def resource_allowed(enforcer, context, resource_allowed_action, target={}): """Check whether 'resource_allowed_action' is allowed by 'enforcer' for the given context. """ credentials = context.to_policy_values() if 'tenant_id' not in credentials: credentials['tenant_id'] = credentials.get('project_id', None) if 'is_admin' not in credentials: credentials['is_admin'] = context.is_admin if not target: # This allows 'admin_or_owner' type rules to work target = { 'project_id': context.tenant, 'user_id': context.user, 'tenant_id': context.tenant } return enforcer.enforce(resource_allowed_action, target, credentials)
7aec1d7551dbd50f9840e77d518be8578e48d165
84,551
import re def is_dimensionless_standard_name(xml_tree, standard_name): """ Returns True if the units for the associated standard name are dimensionless. Dimensionless standard names include those that have no units and units that are defined as constant units in the CF standard name table i.e. '1', or '1e-3'. """ # standard_name must be string, so if it is not, it is *wrong* by default if not isinstance(standard_name, str): return False found_standard_name = xml_tree.find(".//entry[@id='{}']".format(standard_name)) if found_standard_name is not None: canonical_units = found_standard_name.find("canonical_units") # so far, standard name XML table includes # 1 and 1e-3 for constant units, but expanding to valid udunits # prefixes to be on the safe side # taken from CF Table 3.1 of valid UDUnits prefixes dimless_units = r"1(?:e-?(?:1|2|3|6|9|12|15|18|21|24))?$" return canonical_units is None or re.match(dimless_units, canonical_units.text) # if the standard name is not found, assume we need units for the time being else: return False
56dd35f86fd1854fbb7db73ce79645ae368ea9ea
84,553
def dance(val): """Force the val to ASCII.""" return val.encode('ascii', 'ignore').decode('ascii')
2b4ec5c9f64aad1348ee703f9765a0a35c455375
84,554
def render_abc(abc: str, show_source: bool = False) -> str: """Create Javascript code for rendering ABC music notation :param abc: The ABC source code to render into music notation :param show_source: ``True`` to include the original ABC music notation source code in the cell output :return: The Javascript code as a single string """ abc_js = abc.replace('\n', r'\n') output = [ 'require(["abcjs"], function(ABCJS) {', ' element.prepend("<div class=\'abc\'></div>");', ' var output = element.find(".abc").get(0);', ' console.log(element, output, ABCJS, "{}");'.format(abc_js), ' ABCJS.renderAbc(output, "{}");'.format(abc_js), '});'] if show_source: output.insert(3, 'element.prepend("<pre>{}</pre>");' .format(abc.replace('\n', '<br />'))) return ''.join(output)
5e02b7f289a235a30666dbbbb869c9d6903a519c
84,556
def add_schema(url): """Returns a URL with scheme supplied given a URL string""" if "http://" in url or "https://" in url: return url else: return "http://" + url
00b1aedb0847373ddf098484b30d93230b998ab4
84,573
from datetime import datetime import time def transform_date_to_nanoseconds(date) -> int: """Get date from string and return it in UNIX nanoseconds format. Args: date: (str) Datetime string in `%Y-%m-%d %H:%M:%S` format. Returns: int: Date in UNIX nanoseconds. """ date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').timetuple() # noqa: WPS323 date = time.mktime(date) * 10 ** 9 return int(date)
1c28de9196a4708759d56bb27c82d6e059ec0e64
84,580
import torch def form_pair_info_tensor( batch_dim_idxs: torch.Tensor, px_1d_y: torch.Tensor, px_1d_x: torch.Tensor ): """ Hstack 3 length-N 1d arrays into a (N,3) array Args: - batch_dim_idxs: size (N,) array representing indices of examples in a minibatch - px_1d_y: size (N,) array representing row indices - px_1d_x: size (N,) array representing column indices Returns: - pair_info: (N,3) array """ # batch dim N = batch_dim_idxs.shape[0] assert batch_dim_idxs.shape == (N,) assert px_1d_y.shape == (N,) assert px_1d_x.shape == (N,) pair_info = torch.stack([batch_dim_idxs, px_1d_y, px_1d_x]) return pair_info.t()
053dd77199fb6ed44cdf23c644460c6c6ee393d7
84,587
def splitall(string, splitcharlist): """Splits the supplied string at all of the characters given in the second argument list :param string: the string to break up :type string: string :param splitcharlist: a list of characters to break on :type splitcharlist: a list of characters (string) :example: >>> splitall("fred,daphne.velma", ",.") ["fred", "daphne", "velma"] >>> """ strlist = [string] for i in splitcharlist: newlist = [] for j in strlist: tmplist = j.split(i) for k in tmplist: newlist.append(k) strlist = [] for j in newlist: strlist.append(j) newlist = [] for i in strlist: if i != '': newlist.append(i) return newlist
3d5379707edd0900697637b43f7fc72047a00e36
84,588
def get_label2id(labels): """ Get label2id mapping based on labels Args: labels: list of labels. Return: label2id map """ return {v: str(k) for k, v in enumerate(labels)}
644b9ce55a3df43eb8c85ed8086b2d10dbcc6951
84,592
def _process_get_set_Tol(operNum,reply): """Process reply for functions zGetTol and zSetTol""" rs = reply.rsplit(",") tolType = [rs[0]] tolParam = [float(e) if i in (2,3) else int(float(e)) for i,e in enumerate(rs[1:])] toleranceData = tuple(tolType + tolParam) return toleranceData
cb52b3f7b64c7afb7452b287f10a2abb090fe602
84,594
def temperature_to_heat_flux(temperature: float, ambient_temperature: float = 293.15): """Function returns hot surface heat flux for a given temperature. :param temperature: [K] emitter temperature. :param ambient_temperature: [K] ambient/receiver temperature, 20 deg.C by default. :return heat_flux: [K] calculated emitter temperature based on black body radiation model. """ epsilon = 1.0 # radiation view factor sigma = 5.67e-8 # [W/m2/K4] stefan-boltzmann constant heat_flux = epsilon * sigma * (temperature ** 4 - ambient_temperature ** 4) return heat_flux
9ad1d364c2d084ee0ddb00c2c861e0eac93a198b
84,597
def forgetting_to_bwt(f): """ Convert forgetting to backward transfer. BWT = -1 * forgetting """ if f is None: return f if isinstance(f, dict): bwt = {k: -1 * v for k, v in f.items()} elif isinstance(f, float): bwt = -1 * f else: raise ValueError("Forgetting data type not recognized when converting" "to backward transfer.") return bwt
20b69f1f89c00575e8ad595890704b14463bab8d
84,603
def format_schedule(sched): """ Formats a schedule (as list of Operations) for printing in HTML """ s = '' for op in sched: if op.type!='READ' and op.type!='WRITE': s += '<b>'+str(op)+' </b>' else: s += str(op)+' ' return s+'\n'
da24e73b3f0d679ca7bd12823e8c2f30100a5878
84,605
import re def remove_indentation(content: str) -> str: """ Removes indentation from a given string that contains multiple lines. It removes spaces before new lines by the first line spaces at the beginning. Args: content(str): The sting that we want to clean from the indentation. Returns: str: The unindented content. """ indentation = next(iter(re.findall("^\n*( *)", content)), "") unindented = re.subn(f"(\n){indentation}", r"\1", content)[0].strip() return unindented
76419258643c80597f442b6f48dae8167e4a1cad
84,606
def change_cols(data, params): """ Changes / filters columns according to the parameters passed. data: a dataframe with columns for a certain dtype params: a list of column names to values A False value means the column will be omitted A True value means the column will be included as-is A string value means the column will be re-named to that value """ # Drop bad columns first good_cols = [col for col,val in params.items() if (val and col in data.columns)] new_data = data[good_cols].copy() # Re-map column names col_mapper = {col: new_col for col,new_col in params.items() if isinstance(new_col, str)} new_data.rename(columns=col_mapper, inplace=True) return new_data
e48cc4278ddcd77517a3f910a63bfde5fe91f3b4
84,608
def load_database(database_file, sep=','): """Return a doictionary of kmers -> taxa. Parse the database file into a python dictionary. The keys in this dictionary will be DNA sequences and the values will be the taxonomic annotation where this sequence is found. The database file is composed of lines with two values each: a kmer and its taxonomic annotation. These lines are seperated by a comma. """ out = {} for line in database_file: tkns = line.strip().split(sep) if len(tkns) == 2: out[tkns[0]] = tkns[1] return out
ed7865e3d6f8bcc93e8d843e72b59357c4e6bbc1
84,609
def get_percentage(position: int, length: int) -> str: """Format completion percentage in square brackets.""" percentage_message = f"{position * 100 // length}%".rjust(4) return f"[{percentage_message}]"
5d4d5a9276228579d168f361788d50e328aa623f
84,614
import calendar import time def toutctimestamp(stamp): """ Converts a naive datetime object to a UTC unix timestamp. This has an advantage over `time.mktime` in that it preserves the decimal portion of the timestamp when converting. :param datetime stamp: Datetime to convert :returns: Unix timestamp as a ``float`` :: from datetime import datetime from pytool.time import toutctimestamp utc_stamp = toutctimestamp(datetime.now()) """ decimal = (1.0 * stamp.microsecond / 10**6) if stamp.tzinfo: # Hooray, it's timezone aware, we are saved! return calendar.timegm(stamp.utctimetuple()) + decimal # We don't have a timezone... shit return time.mktime(stamp.timetuple()) + decimal
70c00b49c6b78739ab6f0ba33fe454b185ee49f1
84,619
def substitute(sequence, offset, ref, alt): """Mutate a sequence by substituting given `alt` at instead of `ref` at the given `position`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of `sequence` ref : sequence or str What do we expect to find at the position? alt : sequence or str Alternate sequence to insert """ n_ref = len(ref) sequence_ref = sequence[offset:offset + n_ref] assert str(sequence_ref) == str(ref), \ "Reference %s at offset %d != expected reference %s" % \ (sequence_ref, offset, ref) prefix = sequence[:offset] suffix = sequence[offset + n_ref:] return prefix + alt + suffix
5834da04d1c3565a8500336dab7edcacc1f2e462
84,623
def not_period(tok): """boolean token test for non-period.""" return not(tok.type == 'PERIOD')
6d533e5f307effa2483f0efe1d969c49acce796a
84,625
def single_template_filter(template_name: str): """Filter by exact template name""" if template_name: return {'name__iexact': template_name} return None
3848caa91d626286d201ca44f81373bb59919ae8
84,626
def to_libxc_name(functionals): """Given a list of section_XC_functionals, returns the single string that represents them all. """ return "+".join("{}*{}".format(x.XC_functional_weight, x.XC_functional_name) for x in sorted(functionals, key=lambda x: x.XC_functional_name))
80a8d329c2d13a326d4321458afc0cd207029965
84,628
def in_d(key, dic, return_if_false=None): """Checks if key is in dic. returns corresponding value if key is found; returns None (default) or return_if_false (if specified) otherwise""" if key in dic: return dic[key] else: if return_if_false is None: return None else: return return_if_false
11aa422623af7a31f94fbc6194bc4467ff7317e6
84,629
def total(n): """ Returns the sum of the numbers from 0 to n (inclusive). If n is negative, returns None. """ if n < 0: return None else: result = 0 for i in range(n + 1): result += i return result
c816b27508d77f2218e2b46e0b6dfa7e494f1dc4
84,630
def realword(sym): """ Test if a word is a real word (not silence or filler) """ if sym.lower() in ('<s>','<sil>','</s>'): return False if sym.startswith("++"): return False return True
2704ca95fc07b010d80e3c8b93fc9ae33d474743
84,636
import time def timeit(my_func): """time the execution of the function given in argument :param: my_func (func) : function to be timed :print: execution time of my_func :return: timed (my_func result-like) : result of my_func """ def timed(*args, **kw): """get result of my_func with given arguments :param: *args (any argument) : packed unnamed arguments to pass to my_func :param: *kw (any argument) : packed named arguments to pass to my_func :return: result (my_func result-like) : result of my_func with gven arguments """ ts = time.time() result = my_func(*args, **kw) te = time.time() print('%r took %2.3g sec' % (my_func.__name__, te - ts)) return result return timed
c1e11714b7ee5db83c728fc183ed8d9a76a6e8d0
84,638
import torch def tensor_dot(x, y): """Performs a tensor dot product.""" res = torch.einsum("ij,kj->ik", (x, y)) return res
987198ddb5a851c01ec31ab6bfeaf7c3dac133af
84,640
import pathlib def path(args) -> pathlib.Path: """ Return path where model will be placed upon `.zip` unpacking. Parameters ---------- args : dict-like User provided arguments parsed by argparse.ArgumentParser instance. Returns ------- pathlib.Path """ if args.directory is not None: return pathlib.Path(args.directory) / args.source return pathlib.Path(args.source)
04b7e755f7e8552324bf258800e075155bdd5145
84,648
def _cpp_integer_type_for_range(min_val, max_val): """Returns the appropriate C++ integer type to hold min_val up to max_val.""" # The choice of int32_t, uint32_t, int64_t, then uint64_t is somewhat # arbitrary here, and might not be perfectly ideal. I (bolms@) have chosen # this set of types to a) minimize the number of casts that occur in # arithmetic expressions, and b) favor 32-bit arithmetic, which is mostly # "cheapest" on current (2018) systems. Signed integers are also preferred # over unsigned so that the C++ compiler can take advantage of undefined # overflow. for size in (32, 64): if min_val >= -(2**(size - 1)) and max_val <= 2**(size - 1) - 1: return "::std::int{}_t".format(size) elif min_val >= 0 and max_val <= 2**size - 1: return "::std::uint{}_t".format(size) return None
771856d3b6df07df9460e7fae2d21afeeb8f214c
84,650
import torch def reverse(x, axis): """Reverse a tensor along the specified axes. Arguments: x: Tensor to reverse. axis: Integer or iterable of integers. Axes to reverse. Returns: A tensor. """ if isinstance(axis, int): axis = [axis] return torch.flip(x,dims=axis)
209c90e6ac07eb1349d6d0ffdaec3d96879d0723
84,654
def unique(input_list): """Return unique value of the input list""" try: # intilize a null list unique_list = [] # traverse for all elements for x in input_list: # check if exists in unique_list or not if x not in unique_list: unique_list.append(x) return(unique_list) except TypeError as detail: return ("int object is not iterable")
48b1ba3d033c4bed344f90d50cb031d61a0c2952
84,658
def str2list(_str): """Convert string type to list of one string :arg str _str: a string """ if isinstance(_str, str): return [_str] elif isinstance(_str, list): return _str else: raise ValueError('"_str" input is not a str or list')
c9f184957167b32d412cc7b592c093d37faa3d6a
84,664
def max_multiple(divisor: int, bound: int) -> int: """ Given a Divisor and a Bound , Find the largest integer N , Such That , Conditions: 1. N is divisible by divisor 2. N is less than or equal to bound 3. N is greater than 0. Notes: 1. The parameters (divisor, bound) passed to the function are only positve values . 2. It's guaranteed that a divisor is Found . :param divisor: :param bound: :return: """ while bound > 0: if bound % divisor == 0: return bound bound -= 1 return 0
6160f513188339bb1601a7894bc21a34d4cd5f38
84,666
def get_json_uri(json_line): """Returns the URI of page from JSON data. Args: json_line: dictionary with JSON data. Returns: (current_uri, json_line): the current URI, and the original JSON dictionary. """ try: current_uri = json_line["envelope"]["warc-header-metadata"]["warc-target-uri"] return (current_uri, json_line) except Exception as e: pass
164d957451a30c0c47afff9383532717a945b9f2
84,667
def int_to_roman(number: int) -> str: """ Convert integer to roman numerals https://en.wikipedia.org/wiki/Roman_numerals >>> int_to_roman(3456) 'MMMCDLVI' >>> int_to_roman(3999) 'MMMCMXCIX' """ vals = {"M": 1000, "CM": 900, "D": 500, "CD": 400, "C": 100, "XC": 90, "L": 50, "XL":40, "X": 10, "IX": 9, "V": 5, "IV":4, "I": 1} roman_num = "" while number > 0: for i, r in vals.items(): while number >= r: roman_num += i number -= r return roman_num
8934717e716cc1e32bdeb10de2dbe6a63f1d0389
84,669
def check_for_ticket_quantity_error(quantity): """ Returns any error message if the ticket's quantity is not between 1 and 100 else if there is no errors it returns false. :param quantity: the ticket's quantity as a string :return: false if no error, else returns the error as a string message """ if int(quantity) < 1 or int(quantity) > 100: return "The quantity of the ticket must be between 1 and 100" return False
80001bbcaffe8193eaa158219f1747b18c4110a5
84,670
def convert_to_template(session, vm_ref): """Convert a VM to a template""" return session.xenapi.VM.set_is_a_template(vm_ref, True)
e2bde359a411de9f58c667641a4300935c64ccfd
84,681