content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def num(s): """Function will try to convert the variable to a float. If not possible it will return the original variable.""" try: return float(s) except: return s
d867980a3663a13af12f25e750cc03f638e4cabd
677,112
def find_interval(x, partition, endpoints=True): """ find_interval -> i If endpoints is True, "i" will be the index for which applies partition[i] < x < partition[i+1], if such an index exists. -1 otherwise If endpoints is False, "i" will be the smallest index for which applies x < partition[i]. If no such index exists "i" will be set to len(partition) """ for i in range(0, len(partition)): if x < partition[i]: return i-1 if endpoints else i return -1 if endpoints else len(partition)
42e6cd403263a5cc2529c6e7ac1855957e05ac2f
338,378
def _unpack_row(row: list, interval_length: int) -> tuple: """ Retrieves data-points based on fixed hardcoded positions in source file. Args: row (list): Row of data from source file. interval_length (int): The number of periods included in raw data. Returns: tuple: Separate data points (orders, sku_id, unit_cost, lead_time, retail_price, quantity_on_hand, backlog) """ orders = {} sku_id = row[0] orders['demand'] = list(row[1:interval_length + 1]) unit_cost = row[interval_length + 1] lead_time = row[interval_length + 2] retail_price = row[interval_length + 3] quantity_on_hand = row[interval_length + 4] backlog = row[interval_length + 5] return sku_id, orders, unit_cost, lead_time, retail_price, quantity_on_hand, backlog
f3da46e0dfd0153f80a561b977f23923adf0595f
314,936
def default_Name(x, world) : """The default name of a thing is its id, X.""" return str(x)
504b252b78461713023d57d22112d973dde5b43a
58,604
def _parse_and_format_value(string_value): """Parses and formats a string into the stats format. Args: string_value: Value of the stat as string. Returns: Value of the stat as a float. """ if str(string_value).lower() == 'nan': return None if type(string_value) == float: return string_value if type(string_value) == int: return float(string_value) string_value = str(string_value) if '%' in string_value: string_value = string_value.replace('%', '') return float(string_value) / 100 return float(string_value)
32c819cf5b46f2064c0dde92df48cbe5850523d0
352,304
def attempt(func, *args, **kargs): """Attempts to execute `func`, returning either the result or the caught error object. Args: func (function): The function to attempt. Returns: mixed: Returns the `func` result or error object. Example: >>> results = attempt(lambda x: x/0, 1) >>> assert isinstance(results, ZeroDivisionError) .. versionadded:: 1.1.0 """ try: ret = func(*args, **kargs) except Exception as ex: # pylint: disable=broad-except ret = ex return ret
93e22f01bd0c8f086160002f4db7ccbcd282a1df
88,049
import requests import shutil def download_file(url): """Downloads file from parameter using requests and shutil Args: url (string): URL of file to be downloaded Returns: string: filename of the downloaded file """ local_filename = url.split("/")[-1] with requests.get(url, stream=True) as r: with open(local_filename, "wb") as f: shutil.copyfileobj(r.raw, f) return local_filename
46812dad98eee01aee968bbe7a88d8f38a21afb1
378,528
import re def ToLowerCamel(name): """Convert a name with underscores to camelcase.""" return re.sub('_[a-z]', lambda match: match.group(0)[1].upper(), name)
240e8bb697b27258301aa6d8ae9079f048a8d040
112,841
def get_model_scores(model, train_test_sets): """ Returns scores produced by trained estimator Uses the `predict_proba` attribute or the `decision_function`, whichever is available. Input ----- model: trained (fitted) classifier train_test_sets: tuple of (X_tr_scaled, y_tr, X_te_scaled, y_te) Returns ------- probs_positive_class: model scores """ X_tr_scaled, y_tr, X_te_scaled, y_te = train_test_sets if hasattr(model, 'predict_proba'): probs_positive_class = model.predict_proba(X_te_scaled)[:, 1] else: probs_positive_class = model.decision_function(X_te_scaled) probs_positive_class = \ (probs_positive_class-probs_positive_class.min())/(probs_positive_class.max()-probs_positive_class.min()) return probs_positive_class
ceb2c08517cad2f2e41e95f9056385d67644b12e
539,123
import re def is_var_value(p, pltype): """ Variable values follow the variable description or other variable values or variable descriptions. It should have a value, whitespace, period, and then value description. """ if pltype in ['Blank', 'Var Desc', 'Var Value', 'Val Desc']: pparser = re.compile(r"[\t ]+\.") words = pparser.split(p) if len(words) > 1 and words[0] and words[0][0] != '.': return True return False
b0788b3104aa34a90516460e472d641ba1174573
246,765
def _GetRevision(options): """Get the SVN revision number. Args: options: options object. Returns: The revision number. """ revision = options.build_properties.get('got_revision') if not revision: revision = options.build_properties.get('revision', 'testing') return revision
008a176d47e3da1114abafad37a8d5d63c562a82
278,352
def assign_province_road_conditions(x): """Assign road conditions as paved or unpaved to Province roads Parameters x - Pandas DataFrame of values - code - Numeric code for type of asset - level - Numeric code for level of asset Returns String value as paved or unpaved """ asset_code = x.code asset_level = x.level # This is an expressway, national and provincial road if asset_code in (17, 303) or asset_level in (0, 1): return 'paved' else: # Anything else not included above return 'unpaved'
81fe01661bf184dd52795f4b8b0ecf3c04e4f917
682,579
def _get_ring_marker(used_markers): """ Returns the lowest number larger than 0 that is not in `used_markers`. Parameters ---------- used_markers : Container The numbers that can't be used. Returns ------- int The lowest number larger than 0 that's not in `used_markers`. """ new_marker = 1 while new_marker in used_markers: new_marker += 1 return new_marker
c501ec6cb769d4cc66fcddb8f5e8759b4a327790
286,925
def get_ipsec_udp_key_history( self, ) -> dict: """Get IPSEC UDP key history .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - ikeless - GET - /ikeless/seedHistory :return: Returns dictionary ikeless key history status \n * keyword **activeSeedPersistSeed** (`bool`): Whether active key material persists on appliance * keyword **currentActivationTime** (`int`): Epoch time of the activation of current active key material * keyword **previousActiveSeedId** (`str`): ID of previous active key material * keyword **newSeedPersistSeed** (`bool`): Whether new key material persists on appliance * keyword **newSeedId** (`str`): ID of new key material * keyword **activeSeedId** (`str`): ID of current active key material * keyword **newSeedLifetime** (`int`): Epoch time of the lifetime of new key material * keyword **previousActivationTime** (`int`): Epoch time of the activation of previous active key material * keyword **activeSeedLifetime** (`int`): Epoch time of the lifetime of active key material :rtype: dict """ return self._get("/ikeless/seedHistory")
3a608e2a077f8e014a1d3b277073fda4ea2d15db
256,217
def chunks(l, n): """Yield successive n-sized chunks from l. Parameters ---------- l : list The list to split in_ chunks n : int The target numbers of items in_ each chunk Returns ------- list List of chunks """ pieces = [] for i in range(0, len(l), n): pieces.append(l[i:i + n]) return pieces
21f6a811f2286c0c2a4a09804f08be6d60cb098a
516,951
from typing import Optional import yaml def get_repo_version(filename: str, repo: str) -> Optional[str]: """Return the version (i.e., rev) of a repo Args: filename (str): .pre-commit-config.yaml repo (str): repo URL Returns: Optional[str]: the version of the repo """ with open(filename, "r") as stream: pre_commit_data = yaml.safe_load(stream) pre_config_repo = next( (item for item in pre_commit_data["repos"] if item["repo"] == repo), None ) if pre_config_repo: return pre_config_repo["rev"] return None
821653bdeb60a86fce83fb3a05609996231ec5d4
531
from typing import Dict from typing import Tuple import json def read_tagger_mapping(filename: str) -> Dict[str, Tuple[str, str]]: """ Reads the tagger mapping from `filename`. :param str filename: Path to tagger mapping JSON file. :return: Dict with entity type as key and tuple consisting of tagger name and tagger version as value """ with open(filename) as f: content = f.read() mapping = json.loads(content) return mapping
47050379e3db18ac7b2b99e622f2bbbbde4ea601
425,899
import pathlib import shutil def workdir(tmpdir): """Create a working directory with PatchSim test data files.""" basedir = pathlib.Path(__file__).resolve().parents[1] / "manual_tests" for fname in basedir.glob("cfg_*"): shutil.copy(str(basedir / fname.name), str(tmpdir)) for fname in basedir.glob("*.txt"): shutil.copy(str(basedir / fname.name), str(tmpdir)) for fname in basedir.glob("*.out.expected"): shutil.copy(str(basedir / fname.name), str(tmpdir)) return pathlib.Path(str(tmpdir))
8be72a6b798ca25f05bb42eb13ccf44d5d93ec99
466,081
def average_error_to_weight(error): """ Given the average error of a pollster, returns that pollster's weight. The error must be a positive number. """ return error ** (-2)
117909d1660b57775193eed722a3f0f49a9bf2ab
565,680
def theta(x): """heaviside function.""" return int(x >= 0)
a1567bf1553f54953164eb4371573c73163b4436
137,591
def find_short(strg): """Return length of shortest word in sentence.""" words = strg.split() min_size = float('inf') for word in words: if len(word) < min_size: min_size = len(word) return min_size
87e99a5754ede74d74e76199c176f956d424fc44
32,593
def subor_obsahuje(nazov_suboru: str, hladany_subor: str, encoding='utf8') -> bool: # type boolean """ Zisti, či súbor názov_súboru vo svojom texte obsahuje hladany_text.      Vráť False, ak súbor nemožno otvoriť alebo sa jedná o adresár. """ try: with open(nazov_suboru, encoding=encoding) as f: text = f.read() # precitanie suboru return hladany_subor in text except (FileNotFoundError, PermissionError, IsADirectoryError): return False
0ab47abe718eb3645c8c5969be48005fe9a7a06f
227,018
import inspect def find_arg_names(fn) -> tuple: """ Creates a dictionary mapping of the argument name to its index of a function :param fn: a callable function :return: a tuple with the first item being a dictionary of the argument name to index of the argument, the second argument being a dictionary mapping of the kwarg name to default value """ arg_to_index = {} kwargs_to_default = {} for i, (name, value) in enumerate(inspect.signature(fn).parameters.items()): arg_to_index[name] = i if value.default is not value.empty: kwargs_to_default[name] = value.default return arg_to_index, kwargs_to_default
8f4d8f393d4fc9de8a3a047a304f36326b9fd05d
253,441
def get_num_params(vocab_size, num_layers, num_neurons): """Returns the number of trainable parameters of an LSTM. Args: vocab_size (int): The vocabulary size num_layers (int): The number of layers in the LSTM num_neurons (int): The number of neurons / units per layer Returns: int: The number of trainable parameters """ num_first_layer = 4 * (num_neurons * (vocab_size + num_neurons) + num_neurons) num_other_layer = 4 * (num_neurons * 2 * num_neurons + num_neurons) num_softmax = vocab_size * num_neurons + vocab_size return num_first_layer + (num_layers - 1) * num_other_layer + num_softmax
c9620e74206878cc3390895dacbf10c84da42829
700,672
def data_for_stan(cases, settings): """ Returns data for the model. Parameters ---------- list of float: Cumulative number of people infected (confirmed). Returns ------- dict: Data that is supplied to Stan model. """ q = -1 + settings.population_size / cases[0] return { "n": len(cases), "cases": cases, "k": settings.population_size, "q": q }
75505b23ec58d1c84b62dd4f0f876a2b1dd4cbc2
228,625
import torch def single_emd_loss(p, q, r=2): """ Earth Mover's Distance of one sample Args: p: true distribution of shape num_classes × 1 q: estimated distribution of shape num_classes × 1 r: norm parameter """ assert p.shape == q.shape, "Length of the two distribution must be the same" length = p.shape[0] emd_loss = 0.0 for i in range(1, length + 1): emd_loss += torch.abs(sum(p[:i] - q[:i])) ** r return (emd_loss / length) ** (1. / r)
0919a240ad1dc724ac0ef2da26f367c161a3d575
433,008
def _new_episodes(cache, feed): """Returns all episodes in the feed that have not been previously added to the cache. """ episodes = [] for episode in feed.episodes: if not cache.has(episode): episodes.append(episode) return episodes
73fd45d38f7f6f685f00358ee32ffe59a5e6e81e
308,400
def filter_vocab(vocab, df, min_df): """ Filter out rare tokens and construct a new vocabulary. @param vocab: input vocabulary @param df: document frequency dictionary (term: frequency) @param min_df: minimum document frequency """ new_vocab = {'PAD': 0} for term in vocab: if term != 'PAD' and df[term] >= min_df: new_vocab[term] = len(new_vocab) return new_vocab
cb3dca65267eaad08efe2c457dcc4678c6f52abb
191,876
def infer_tz_from_timestamp(timestamp): """ infer the the time zone from a timestamp object :param pandas.Timestamp/datetime.datetime timestamp: the target timestamp :return: the inferred time zone :rtype: datetime.tzinfo """ return timestamp.tzinfo
290c8acd55a993120d5750612f0b4665675500bb
158,908
def standardize_method_to_len_3(name, padding="--", joiner=","): """Standardize an LCIA method name to a length 3 tuple. ``name`` is the current name. ``padding`` is the string to use for missing fields. """ if len(name) >= 3: return (tuple(name)[:2] + (joiner.join(name[2:]),)) else: return (tuple(name) + (padding,) * 3)[:3]
621dd9e18a1f87fcd950cb25f89b14e810b6132d
504,002
import gettext def app_translator(lang): """Localization of the application. Arguments --------- lang: "String containing application language" Modules ------- gettext: "Internationalization and localization support" Returns ------- trad: "Function to translate string" """ language = gettext.translation('PyArchboot', localedir='locales', languages=['{lang}'.format(lang=lang)]) trad = language.gettext return trad
e36534b6b3f1d9e830f9230e181fb95decf79024
289,361
import re def get_build_id(log_file): """This function finds the build ID within the appropriate log file. Inputs: - log_file: Absolute path to the Klocwork log file kwloaddb.log [string] Outputs: - build_id: The build ID that is used by Klocwork to identify the analysis [string] """ # Initialize variables build_id = None # Read in the first line of the log file with open(log_file, 'r') as input_fh: log_line = input_fh.readline() # Split the line line_split = filter(None, re.split('[" ]', log_line)) # Find the build ID parameter for item in line_split: if item.startswith('build'): build_id = item break return build_id
b2aed1e0f1b85a4354ad3e8aa844e0fbde86ce17
174,124
def write_normal(fname,triplets,na,angd,agr): """ Write out ADF data in normal ADF format. """ outfile= open(fname,'w') outfile.write('# 1:theta[i], ') for it,t in enumerate(triplets): outfile.write(' {0:d}:{1:s}-{2:s}-{3:s},'.format(it+2,*t)) outfile.write('\n') for i in range(na): outfile.write(' {0:10.4f}'.format(angd[i])) for it,t in enumerate(triplets): outfile.write(' {0:11.3e}'.format(agr[it,i])) outfile.write('\n') outfile.close() return None
d12e858060dad0f398beb139aaf9a4edda255807
35,741
def map_to_45(x): """ (y-y1)/(x-x1) = (y2-y1)/(x2-x1) ---> x1 = 1, x2 = 5, y1 = 1, y2 = 4.5 output = output_start + ((output_end - output_start) / (input_end - input_start)) * (input - input_start) """ input_start = 1 input_end = 5 output_start = 1 output_end = 4.5 if x >= 5: return 4.5 return output_start + ((output_end - output_start) / (input_end - input_start)) * (x - input_start)
0b3c62dfb99d24ee8a85e9ff7c621676db530e57
689,952
def sushi(roll): """Transform a frame to a type, value structure Examples: - ['X', '-'] -> {type: "strike", values: [10, 0]} - ['2', '7'] -> {type: "regular", values: [2, 7]} - ['-', '-'] -> {type: "regular", values: [0, 0]} - ['1', '/'] -> {type: "spare", values: [1, 9]} - ... -- Any frame with 3 rolls is last: - ['X', 'X', 'X'] -> { type: "last", values: [10, 10, 10]} - ['X', '2', '/'] -> { type: "last", values: [10, 2, 8]} - ['X', '-', '-'] -> { type: "last", values: [10, 0, 0]} """ sushi_roll = {} wasabi = [] if len(roll) == 3: sushi_roll["type"] = "last" elif roll[0] == "X": sushi_roll["type"] = "strike" elif roll[1] == "/": sushi_roll["type"] = "spare" else: sushi_roll["type"] = "regular" for i in range(0, len(roll)): if roll[i] == "X": wasabi.append(10) elif roll[i] == "/": wasabi.append(10 - int(roll[i - 1])) elif roll[i] == "-": wasabi.append(0) else: wasabi.append(int(roll[i])) sushi_roll["values"] = wasabi return sushi_roll
d8f99c5e7ce23667e6d50e4a981bd75b9dfb7eed
182,618
import torch def compute_inner_distmat(features: torch.Tensor): """ Used to obtain `gallery_distmat` or `all_distmat` Args: features: Tensor(x, m) (x = g for gallery_distmat, x = q + g for all_distmat) Returns: Tensor(x, x) Euclidean squared distance """ n, m = features.shape ff = features.pow(2).sum(dim=1, keepdim=True).expand(n, n) distmat = (ff + ff.t()).addmm(mat1=features, mat2=features.t(), beta=1, alpha=-2) return distmat
54d0e4e3992e59fcb6468e2fa2fc214db54f0428
241,131
import logging def get_port_map(dut, asic_index=None): """ @summary: Get the port mapping info from the DUT @return: a dictionary containing the port map """ logging.info("Retrieving port mapping from DUT") namespace = dut.get_namespace_from_asic_id(asic_index) config_facts = dut.config_facts(host=dut.hostname, source="running",namespace=namespace)['ansible_facts'] port_mapping = config_facts['port_index_map'] for k,v in port_mapping.items(): port_mapping[k] = [v] return port_mapping
f5a194c674a2b84d42cba8f53804289e9812b6be
58,418
def format_number(number): """ Return the number formatted, if the number contain .0 return only the integer part """ if number % 1 == 0: return int(number) else: return number
a1b10ed618c5d3cea9f4679f6386f9a3ca331b63
323,666
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n) because the number of calculations performed depends on the size of num. Space Complexity: Space complexity is also O(n) becuase newman_conway_nums array to store sequence values, nm_sequence_without_leading_zero to store result with leading 0 removed and result a array to which the properly formatted result is saved are created and the amount of space that they occupy will depend on the size of the given num. """ if num == 0: raise ValueError if num == 1: return '1' if num == 2: return '1 1' #array to store sequence values and provide starting values newman_conway_nums = [0, 1, 1] for i in range(3, num + 1): newman_conway_nums.append(newman_conway_nums[newman_conway_nums[i-1]] + newman_conway_nums[i-newman_conway_nums[i-1]]) nm_sequence_without_leading_zero = [str(num) for num in newman_conway_nums if num != 0] result = " ".join(nm_sequence_without_leading_zero) return result
e595fd25e5ed6718339431b20e22b00ae504e5be
688,659
def is_fasta_file_extension(file_name): """ Check if file has fasta extension Parameters ---------- file_name : str file name Returns ------- bool True if file has fasta extension, False otherwise """ if file_name[-4:] == ".fna": return True elif file_name[-4:] == ".faa": return True elif file_name[-6:] == ".fasta": return True elif file_name[-6:] == ".fastq": return True elif file_name[-3:] == ".fa": return True else: return False
711267ab304ca188b03f3a7e816a0df70c3f4fa3
689,185
def cluster_idx(idx_ls): """Given a list of idx, return a list that contains sub-lists of adjacent idx.""" if len(idx_ls) < 2: return [[i] for i in idx_ls] else: output = [[idx_ls[0]]] prev = idx_ls[0] list_pos = 0 for idx in idx_ls[1:]: if idx - 1 == prev: output[list_pos].append(idx) else: output.append([idx]) list_pos += 1 prev = idx return output
ec920c3759d7abdd7620922c4773493b7582670b
529,968
def read_list_from_txt(file): """ Read a text file into a list, with each line as an element. :param file: the .txt file (with path). """ with open(file) as txt_file: contents = txt_file.read().splitlines() print(">>> Loaded {} lines from [{}].".format(len(contents), file)) return contents
97ef4f9e7729b69167a711e5feec1da8b9092cd5
316,148
from datetime import datetime def get_random_seed() -> int: """Generate a random seed based on the current datetime.""" return int(datetime.now().replace(microsecond=0).timestamp())
90ee2cef8c5987ea814c74ac45528960e65b5349
541,333
def myround(x: int, base=5) -> int: """ Round to the next multiple of the base Parameters ---------- x : int input value base : int base value (5 by default) Returns ------- int """ return base * round(x / base)
3dad30bbe338e2c033e329a6bc3e199f91b291e5
481,683
def time_human(x): """ Gets time as human readable """ # Round time x = round(x, 2) for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]: if abs(x) < number: return f"{x:.2f} {unit}" x /= number return f"{x:.2f} years"
3f7f51ac7454e429fc30da64eed075aaf1f10b5b
707,560
from typing import Iterable from typing import Mapping def _iter_but_not_str_or_map(maybe_iter): """Helper function to differ between iterables and iterables that are strings or mappings. This is used for pynads.concrete.List to determine if an iterable should be consumed or placed into a single value tuple. """ return (isinstance(maybe_iter, Iterable) and not isinstance(maybe_iter, (str, Mapping)))
3dab46cfd2d2d19bd0fa744370b9059d6a0683bc
6,815
def _list_tool_categories(tl): """ Given a list of dicts `tl` as returned by the `parse_tool_list` method and where each list element holds a key `tool_panel_section_id`, return a list of unique section IDs. """ category_list = [] for t in tl: category_list.append(t.get('tool_panel_section_id')) return set(category_list)
bfbb5325108fe9fef895edd74761521b729ec965
572,209
from typing import List def diagonal_traverse(matrix: List[List[int]]) -> List[int]: """ Given a matrix of M x N elements (M rows, N columns), return all elements of the matrix in diagonal order. Input: [[1, 2, 3], [4, 5, 6], [7, 8, 9]] Output: 1, 2, 4, 7, 5, 3, 6, 8, 9 :param matrix: square matrix :return: list of diagonal transversed values """ result = [] temp = [] # to reverse odd rows if not matrix or not matrix[0]: return result height, length = len(matrix), len(matrix[0]) diagonals = range(height + length - 1) for diagonal in diagonals: row = 0 if diagonal < length else diagonal - length + 1 col = diagonal if diagonal < length else length - 1 while row < height and col > -1: temp.append(matrix[row][col]) row += 1 col -= 1 if diagonal % 2 == 0: result.extend(temp[::-1]) else: result.extend(temp) temp.clear() return result
fe77f0825cafb51bd3286ebd170141a6d588fde4
177,217
def start_worker(w): """Start a worker. Returns: Worker """ w.run() return w
61955ab00805e20b3b11c74e0113b1124ef70fc3
543,889
def _encode_selected_predictions_csv(predictions, ordered_keys_list): """Encode predictions in csv format. For each prediction, the order of the content is determined by 'ordered_keys_list'. :param predictions: output of serve_utils.get_selected_predictions(...) (list of dict) :param ordered_keys_list: list of selected content keys (list of str) :return: predictions in csv response format (str) """ def _generate_single_csv_line_selected_prediction(predictions, ordered_keys_list): """Generate a single csv line response for selectable inference predictions :param predictions: output of serve_utils.get_selected_predictions(...) (list of dict) :param ordered_keys_list: list of selected content keys (list of str) :return: yields a single csv row for each prediction (generator) """ for single_prediction in predictions: values = [] for key in ordered_keys_list: if isinstance(single_prediction[key], list): value = '"{}"'.format(single_prediction[key]) else: value = str(single_prediction[key]) values.append(value) yield ','.join(values) return '\n'.join(_generate_single_csv_line_selected_prediction(predictions, ordered_keys_list))
8eedbb0f9a7785d860f7341da12ce1dae835fcca
416,891
from typing import List from typing import Dict def construct_rows(header: list, rows: list) -> List[Dict]: """Construct a list of csv row dicts.\n Arguments: header {list} -- csv header\n rows {list} -- csv contents\n to warp if there is only a single row, e.g. [row]\n Returns: List[Dict] -- a list of csv rows\n """ row_dicts = [{k: v for k, v in zip(header, row)} for row in rows] return row_dicts
771b2dfde99a8b517331695d160eb9f809e4933c
30,447
def paste_js(clipboard): """Paste the string ``clipboard`` into the selected text of the focused element in the DOM using JavaScript/jQuery. """ return ( f"var focused = document.activeElement;\n" f"var start = focused.selectionStart;\n" f"var end = focused.selectionEnd;\n" f"var val = focused.value;\n" f"var new_val = val.slice(0, start) + `{clipboard}` + val.slice(end, val.length);\n" f"focused.value = new_val;\n" f"var cursorPos = start + `{clipboard}`.length;\n" f"focused.setSelectionRange(cursorPos, cursorPos);" )
7bdf79308004698f1fd4d6a44af5d958ee78677c
117,213
def member_to_beacon_proximity_fill_gaps(m2b, time_bins_size='1min', max_gap_size = 2): """ Fill gaps in a given member to beacon object Parameters ---------- m2b : Member to beacon object time_bins_size : str The size of the time bins used for resampling. Defaults to '1min'. max_gap_size : int this is the maximum number of consecutive NaN values to forward/backward fill Returns ------- pd.DataFrame : The member-to-beacon proximity data, after filling gaps. """ df = m2b.copy().reset_index() df = df.sort_values(by=['member', 'beacon', 'datetime']) df.set_index('datetime', inplace=True) df = df.groupby(['member', 'beacon']) \ [['rssi', 'rssi_std','rssi_smooth_window_count']] \ .resample(time_bins_size) \ .fillna(method='ffill', limit=max_gap_size) df = df.reorder_levels(['datetime', 'member', 'beacon'], axis=0)\ .dropna().sort_index() return df
9578c80a80f4ec00b476466bfa8cc1d29f60f165
279,916
def biofile(itisbio): """ Returns string containing ".bio" or empty string depending on fasta sequence employed Parameters ---------- itisbio : bool Contains information about the nature of fasta sequence. Returns ------- bio_path : str Either ".bio" or empty string. """ if itisbio: bio_path='.bio' else: bio_path='' return bio_path
f356cade82d476f23c39a1b61a715c3e693a76b5
541,827
def normalize_color_tuple(h :int, s:int, x:int) -> tuple: """ Normalize an HSV or HSL tuple. Args: h: `int` in {0, ..., 360} corresponding to hue. s: `int` in {0, ..., 100} corresponding to saturation. x: `int` in {0, ..., 100} corresponding to light or value. Returns:l The corresponding normalized tuple. """ return (h / 360, s / 100, x / 100)
107bdb5578682f8a0b60cb81aabd995aa8d45f0f
386,557
def get_positive_dim(dim: int, dim_size: int) -> int: """ Given an integer number that represents a dimension in the array, transform it to a positive integer dim if it's negative. Otherwise, do nothing. Args: dim (int): A integer number that represents a dimension in an array. dim_size (int): The size of the dimension in the array. Returns: A positive integer that represent the same dimension as the given dim. """ if dim < 0: return dim % dim_size return dim
dd619a459d1e4a242ae9e4a1754c1322ced55770
340,390
import torch def decode_μ_law(waveform: torch.Tensor, μ: int = 255) -> torch.Tensor: """ Applies the element-wise inverse μ-law encoding to the tensor. Args: waveform: input tensor μ: size of the encoding (number of possible classes) Returns: the decoded tensor """ assert μ & 1 μ = μ - 1 hμ = μ // 2 out = (waveform.type(torch.float32) - hμ) / hμ out = torch.sign(out) / μ * (torch.pow(μ, torch.abs(out)) - 1) return out
4df6bc228bc019545418f144a46068504edf0e81
119,091
import requests def get_api_results(url, id): """[summary] Args: url ([str]): [External API url] id ([int]): [member id] Returns: [json]: [API request response] """ r = requests.get(url.format(id)) return r.json()
4dc686c616f3ea9124c866b593d44bdc63e54d1d
699,689
from typing import Sequence import torch def _project_anchors(fmap_shape: Sequence[int], stride: int, anchors: torch.Tensor) -> torch.Tensor: """ project the calculated anchors in each (W x H) positions of a feature map. Args: fmap_shape (Tuple[int, int]): shape of the feature map (W x H) stride (int): stride of the feature map (downscale ratio to original image size) anchors (torch.Tensor): calculated anchors for a given stride Returns: torch.Tensor: anchor over all locations of a feature map. Given A anchors, the shape would be `(A x H x W, 4)` """ fw, fh = fmap_shape[:2] x_mids = (torch.arange(fw) + 0.5) * stride y_mids = (torch.arange(fh) + 0.5) * stride x_mids = x_mids.repeat(fh, 1).T.flatten() y_mids = y_mids.repeat(fw) # xy = torch.stack([x_mids, y_mids]).T xy = torch.stack([y_mids, x_mids]).T xyxy = torch.cat([xy, xy], dim=1) # n_pos = xyxy.size(0) xyxy = xyxy.repeat_interleave(anchors.size(0), dim=0) anchors = anchors.repeat(fw * fh, 1) grid_anchors = xyxy + anchors return grid_anchors
18dd36feb44ac6026d41d1e6de5b3eb998c32f64
349,560
def get_modal_scale(scale_notes, mode): """Return the scale after applying a musical mode to it Arguments: scale_notes -- A list of Note objects of which the scale to transform is made mode -- int representing mode value as in mode_info dict """ return scale_notes[mode-1:]
fe3702154b1f41944bf3c415aff7f73cd68b29a6
56,733
import re def strip_color(inp: str) -> str: """ Remove ANSI color/style sequences from a string. May not catch obscure codes used outside this module. :param inp: the string to strip :return: ``inp`` with ansi codes removed """ return re.sub('\x1b\\[(K|.*?m)', '', inp)
73abed964a5e40525d5ec2f35cf29412d6eaff13
110,921
from bs4 import BeautifulSoup def generate_soup(html, parser): """Return BeautifulSoup object Args: html (str): text response from website requested parser (str): parser used by BeautifulSoup Returns: BeautifulSoup: Navigable BeautifulSoup object """ return BeautifulSoup(html, parser)
658f44b2cce962a6427470a4e6e92a34845cccdf
534,751
def harmean(vals): """Compute the harmonic mean of list-like vals. Special case for presence of 0: return 0.""" try: s = sum((1.0 / x for x in vals)) except ZeroDivisionError: return 0.0 return len(vals) / s
cbadecc80da27218101f41616646fc9856d49de3
206,014
def flatten(iterables): """ Flatten an iterable of iterables. Returns a generator. list(flatten([[2, 3], [5, 6]])) => [2, 3, 5, 6] """ return (elem for iterable in iterables for elem in iterable)
0a4b9202f25179518622adcc70da694efcf23e45
377,383
import json import base64 def serialize_validation_info(info): """Serialize the given validation info into a raw format. Args: info (dict): The dictionary of validation info. Returns: unicode: The base64-encoded JSON of the validation info. """ data = json.dumps(info).encode('utf-8') return base64.b64encode(data).decode('utf-8')
b0fde917325a08894c329bc64f60bc8e09b420ed
303,526
from typing import Tuple import math def move_on_angle(x: float, y:float, angle: float, distance: float) -> Tuple[float, float]: """Move a distance towards an angle in radians""" return x + distance * math.cos(angle), y + distance * math.sin(angle)
bb27c87c4cadf0f963476c174633fd47a9396fd1
292,025
def unify_1_md(bins, edges): """Unify 1- and multidimensional bins and edges. Return a tuple of *(bins, edges)*. Bins and multidimensional *edges* return unchanged, while one-dimensional *edges* are inserted into a list. """ if hasattr(edges[0], '__iter__'): # if isinstance(edges[0], (list, tuple)): return (bins, edges) else: return (bins, [edges])
784bb2a650823f30d11a435928ea951fabf5b8b8
223,262
import math def extract_length(**kwargs): """ Extract length measures (tokens and chars; linear and log) on whole query. Returns: (function) A feature extraction function that takes a query and \ returns number of tokens and characters on linear and log scales """ del kwargs def _extractor(query, resources): del resources tokens = len(query.normalized_tokens) chars = len(query.normalized_text) return { "tokens": tokens, "chars": chars, "tokens_log": math.log(tokens + 1), "chars_log": math.log(chars + 1), } return _extractor
d1bfea182d4cb098d5d88e593e7aa08b525a4e72
403,458
def gdv(dd,n=0): """Get-dict-val; returns n-th val of dict dd.""" return dd[list(dd.keys())[n]]
91c618aac978a39aa7278b609e81bc278e8a6626
70,350
import torch def _solve_lstsq_subcols(rhs, lhs, lhs_col_idx): """Solves an over-determined linear system for selected LHS columns. A batched version of `torch.lstsq`. Args: rhs: right-hand side vectors lhs: left-hand side matrices lhs_col_idx: a slice of columns in lhs Returns: a least-squares solution for lhs * X = rhs """ lhs = lhs.index_select(-1, torch.tensor(lhs_col_idx, device=lhs.device).long()) return torch.matmul(torch.pinverse(lhs), rhs[:, :, None])
1b3a489f761ca8e6cfd9a17dd9f55748ae2d4ed6
398,028
def xr_collapse_across_time(da, time_slice=("2080", "2100")): """ Slices an array along time and averages across time. Parameters ---------- da : xr.DataArray with 'time' dimension time_slice : tuple of str first and last date of sub-period to keep. Returns ------ data array with 'time' dropped """ return da.sel(time=slice(time_slice[0], time_slice[1])).mean("time")
a37fa7bb191b9db3eaec30ab7b0314fde3986248
137,060
def freqAt(freqDict: dict, nuc: str, n: int) -> float: """Read a frequency dictionnary returned by the function freqList. **Keyword arguments:** freqDic -- frequence dictionnary nuc -- nucleotide A,T,G or C n -- position """ return freqDict[nuc][n]
a2ae75f0fa4cfbecefed9b25ee271a56149d9a9e
313,798
def points2contour(points): """ Convert points to contours. (pts2cnt) :param points: array of points ([x,y] for openCV, [y,x] for numpy) :return: Example:: points = np.array([[0, 0], [1, 0]]) # points contours = points2contour(points) print contours # np.array([[[0, 0]], [[1, 0]]]) """ return points.reshape(-1, 1, 2)
8fb89b921a7840b6fb2cf6a9e356b7e693cf716a
152,854
def hits(service): """Returns metric 'Hits'.""" return service.metrics.select_by(**{'system_name': 'hits'})[0]
001ff6efcc1e3027d0c1c2ab1d39f0c3e04d4643
648,483
def dustSurfaceDensityDouble(R, Sig1, Sig2, R1, p1, p2): """ Calculates the dust surface density (Sigma d) from a 2-sloped discontinuous power law. """ if R <= R1: return Sig1 * pow(R / R1, -p1) else: return Sig2 * pow(R / R1, -p2)
f7f8f98ec7f8757bb81f952fb3eadbc0909d2efd
532,808
def get_manual_snapshots_for_db_instance(client_obj, instance_id): """ Return list of manual snapshots for instance """ db_snapshots = client_obj.describe_db_snapshots( DBInstanceIdentifier=instance_id, SnapshotType='manual' ) return db_snapshots['DBSnapshots']
5f50b50217185faa723afae06bd1a2616d30a518
182,661
import warnings def format_channel_id(ch): """ Function for formatting an `idelib.dataset.Channel` or `SubChannel` for display. Renders as only the channel and subchannel IDs (the other information is shown in the rest of the table). :param ch: The `idelib.dataset.Channel` or `idelib.dataset.SubChannel` to format. :return: A formatted "channel.subchannel" string. """ try: if ch.parent: return f"{ch.parent.id}.{ch.id}" else: return f"{ch.id}.*" except (AttributeError, TypeError, ValueError) as err: warnings.warn(f"format_channel_id({ch!r}) raised {type(err).__name__}: {err}") return str(ch)
289a579b215f21c58003eff2dbe86c226d71c05f
683,533
def build_window_title(paused: bool, current_file) -> str: """ Returns a neatly formatted window title. :param bool paused: whether the VM is currently paused :param current_file: the name of the file to display :return str: a neatly formatted window title """ return f"EightDAD {'(PAUSED)' if paused else '-'} {current_file}"
736bacb63a5d720b91e15b5812f14c73a1e5dc22
688,926
def power_level(serial: int, x: int, y: int) -> int: """Compute the power level of the fuel cell at x, y. """ rack_id = x + 10 p = rack_id * y + serial p *= rack_id p = (p // 100) % 10 return p - 5
316895b97f752867171ff4dd0463ea5395228b97
36,170
import re def gmlId(id): """ Given an id candidate string, return a valid GML id attribute value by removing forbidden characters. """ return re.sub('\W|^(?=\d)','_', id)
11ebc7a4105526db8919309f366f82f110f2ec67
529,703
def get_metrics(metrics, loss): """Structure metrics and results in a dict""" metrics_values = dict() metrics_values['loss'] = loss.item() prec, prec_by_class = metrics.get_pixel_prec_class() # harmonic_mean=True) recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True) miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True) dice, dice_by_class = metrics.get_dice_class() metrics_values['prec'] = prec metrics_values['prec_by_class'] = prec_by_class.tolist() metrics_values['recall'] = recall metrics_values['recall_by_class'] = recall_by_class.tolist() metrics_values['miou'] = miou metrics_values['miou_by_class'] = miou_by_class.tolist() metrics_values['dice'] = dice metrics_values['dice_by_class'] = dice_by_class.tolist() return metrics_values
97f00067c7c3e66fc31e5c197b32d818bed59e25
542,593
import torch def _soft_nms( box_class, pairwise_iou_func, boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold, topk_per_image, ): """ Soft non-max suppression algorithm. Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec] (https://arxiv.org/abs/1704.04503) Args: box_class (cls): one of Box, RotatedBoxes pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated boxes (Tensor[N, ?]): boxes where NMS will be performed if Boxes, in (x1, y1, x2, y2) format if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format scores (Tensor[N]): scores for each one of the boxes method (str): one of ['gaussian', 'linear', 'hard'] see paper for details. users encouraged not to use "hard", as this is the same nms available elsewhere in detectron2 gaussian_sigma (float): parameter for Gaussian penalty function linear_threshold (float): iou threshold for applying linear decay. Nt from the paper re-used as threshold for standard "hard" nms prune_threshold (float): boxes with scores below this threshold are pruned at each iteration. Dramatically reduces computation time. Authors use values in [10e-4, 10e-2] Returns: tuple(Tensor, Tensor): [0]: int64 tensor with the indices of the elements that have been kept by Soft NMS, sorted in decreasing order of scores [1]: float tensor with the re-scored scores of the elements that were kept """ boxes = boxes.clone() scores = scores.clone() idxs = torch.arange(scores.size()[0]) idxs_out = [] scores_out = [] while scores.numel() > 0: top_idx = torch.argmax(scores) idxs_out.append(idxs[top_idx].item()) scores_out.append(scores[top_idx].item()) top_box = boxes[top_idx] ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0] if method == "linear": decay = torch.ones_like(ious) decay_mask = ious > linear_threshold decay[decay_mask] = 1 - ious[decay_mask] elif method == "gaussian": decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma) elif method == "hard": # standard NMS decay = (ious < linear_threshold).float() else: raise NotImplementedError("{} soft nms method not implemented.".format(method)) scores *= decay keep = scores > prune_threshold keep[top_idx] = False boxes = boxes[keep] scores = scores[keep] idxs = idxs[keep] return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device)
a3cfe782561ba0264dec1b9c452fcdbc19c5d36b
375,137
def on_off(image, w, h, threshold=128): """ Black and white (no greyscale) with a simple threshold. If the color is dark enough, the laser is on! """ result = [] for row in image: result_row = [] for pixel in row: # We draw black, so 255 is for dark pixels result_row.append(255 if pixel < threshold else 0) result.append(result_row) return result
c9e577bf851fa972c1bbe7f8a61afc09ffb37de5
689,111
from typing import Tuple from typing import List def split_by_commas(maybe_s: str) -> Tuple[str, ...]: """Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas """ if not maybe_s: return () parts: List[str] = [] split_by_backslash = maybe_s.split(r'\,') for split_by_backslash_part in split_by_backslash: splitby_comma = split_by_backslash_part.split(',') if parts: parts[-1] += ',' + splitby_comma[0] else: parts.append(splitby_comma[0]) parts.extend(splitby_comma[1:]) return tuple(parts)
ca21e5103f864e65e5ae47b49c161e8527036810
9,794
def early_stopping(val_bleus, patience=3): """Check if the validation Bleu-4 scores no longer improve for 3 (or a specified number of) consecutive epochs.""" # The number of epochs should be at least patience before checking # for convergence if patience > len(val_bleus): return False latest_bleus = val_bleus[-patience:] # If all the latest Bleu scores are the same, return True if len(set(latest_bleus)) == 1: return True max_bleu = max(val_bleus) if max_bleu in latest_bleus: # If one of recent Bleu scores improves, not yet converged if max_bleu not in val_bleus[:len(val_bleus) - patience]: return False else: return True # If none of recent Bleu scores is greater than max_bleu, it has converged return True
dafc48f674673736e5a129aab8ce4c40fdbcbbeb
30,096
def fix_line_breaks(s): """ Convert \r\n and \r to \n chars. Strip any leading or trailing whitespace on each line. Remove blank lines. """ l = s.splitlines() x = [i.strip() for i in l] x = [i for i in x if i] # remove blank lines return "\n".join(x)
8ac0a9cd1bb14e0817746e5f9c70623ee3667a97
649,271
def serialize_config( config_data ): """ Generate a .ini-formatted configuration string to e.g. save to disk. """ return "[syndicate]\n" + "\n".join( ["%s=%s" % (config_key, config_value) for (config_key, config_value) in config_data.items()] )
45cd607770341efa0600c6c253b9a68e8617af0c
643,389
def title(s): """Convert string to title case keeping any words already starting with capital letter as is. """ return " ".join([w.title() if w.islower() else w for w in s.split()])
7bd992818af45901f0c19214b778d309f5944bfe
393,153
def get_box_size(box): """ calculate the bound box size """ return (box[:, 2]-box[:, 0]) * (box[:, 3]-box[:, 1])
54d8353f687300b02c14baee8d186bc3baaf6167
109,552
def split(line, types=None, delimiter=None): """ Splite a line of text and optionally performs type conversion. By default, splitting is performed on whitespace, but a different delimiter can be selected with delimiter keyword argument. For example:接下来的才是最重要的! >>> split('GOOG 100 490.50') ['GOOG', '100', '490.50'] >>> split('GOOG 100 490.5', [str, int, float]) ['GOOG', 100, 490.5] >>> split('GOOG,100,490.50', delimiter=',') ['GOOG', '100', '490.50'] """ fields = line.split(delimiter) if types: fields = [ty(val) for ty, val in zip(types, fields)] return fields
5c85fb8470882d93131dba2adc86b179d5e6000f
599,930
def _single_prefix_path(root): """ Return (single-prefix path, rest of tree with new root) """ path = [] tree = root node_links = root.node_links while len(tree) == 1: tree = next(iter(tree.values())) path.append((tree.item, tree.count)) node_links.pop() tree.parent, tree.item, tree.node_links = None, None, node_links return path, tree
4a11ff455331bd7851e3272e637f4527cf7fcb16
299,260
def split_token_sequences(s, token_seps="/"): """ Split string on whitespace. Also split_token_sequences on slashes but preserve them in the token sequence. """ results = s.split() for sep in token_seps: new_results = [] for part in results: for i, sub_part in enumerate(part.split(sep)): if i > 0: new_results.append(sep) new_results.append(sub_part) results = new_results return results
b9e262dd89ab80e459cae584e36ac9c2b20968a6
236,414
import torch def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6): """ This function is borrowed from https://github.com/kornia/kornia Convert 3x4 rotation matrix to 4d quaternion vector This algorithm is based on algorithm described in https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201 Args: rotation_matrix (Tensor): the rotation matrix to convert. Return: Tensor: the rotation in quaternion Shape: - Input: :math:`(N, 3, 4)` - Output: :math:`(N, 4)` Example: >>> input = torch.rand(4, 3, 4) # Nx3x4 >>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4 """ if not torch.is_tensor(rotation_matrix): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if len(rotation_matrix.shape) > 3: raise ValueError( "Input size must be a three dimensional tensor. Got {}".format( rotation_matrix.shape)) if not rotation_matrix.shape[-2:] == (3, 4): raise ValueError( "Input size must be a N x 3 x 4 tensor. Got {}".format( rotation_matrix.shape)) rmat_t = torch.transpose(rotation_matrix, 1, 2) mask_d2 = rmat_t[:, 2, 2] < eps mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1] mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1] t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2] q0 = torch.stack([ rmat_t[:, 1, 2] - rmat_t[:, 2, 1], t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2] ], -1) t0_rep = t0.repeat(4, 1).t() t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2] q1 = torch.stack([ rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] + rmat_t[:, 1, 0], t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1] ], -1) t1_rep = t1.repeat(4, 1).t() t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2] q2 = torch.stack([ rmat_t[:, 0, 1] - rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2], rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2 ], -1) t2_rep = t2.repeat(4, 1).t() t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2] q3 = torch.stack([ t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1], rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] - rmat_t[:, 1, 0] ], -1) t3_rep = t3.repeat(4, 1).t() mask_c0 = mask_d2 * mask_d0_d1 mask_c1 = mask_d2 * ~mask_d0_d1 mask_c2 = ~mask_d2 * mask_d0_nd1 mask_c3 = ~mask_d2 * ~mask_d0_nd1 mask_c0 = mask_c0.view(-1, 1).type_as(q0) mask_c1 = mask_c1.view(-1, 1).type_as(q1) mask_c2 = mask_c2.view(-1, 1).type_as(q2) mask_c3 = mask_c3.view(-1, 1).type_as(q3) q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3 q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa t2_rep * mask_c2 + t3_rep * mask_c3) # noqa q *= 0.5 return q
0caae639dfde50da99ace14516aee4ff08d72fe4
230,452
def erb_bandwidth(fc): """Bandwidth of an Equivalent Rectangular Bandwidth (ERB). Parameters ---------- fc : ndarray Center frequency, or center frequencies, of the filter. Returns ------- ndarray or float Equivalent rectangular bandwidth of the filter(s). """ # In Hz, according to Glasberg and Moore (1990) return 24.7 + fc / 9.265
10459eb15c6baab5890bf51e12039533f36d6e9b
575,610
from typing import Dict from typing import Any def empty_params(*args, **kwargs) -> Dict[str, Any]: """Return an empty dict.""" return {}
13cfd50c435589619925d3a87a612f827a6ee3d6
581,828
from typing import Union def aslist(i, argname: Union[str, None], allowed_types=list): """ Converts iterables (except strings) into a list. :param argname: If string, it's used in the exception raised when `i` not an iterable. If `None`, wraps non-iterables in a single-item list. """ if not i: return i if isinstance(i, allowed_types) else [] if isinstance(i, str): i = [i] elif not isinstance(i, allowed_types): try: i = list(i) except Exception as ex: if argname is None: return [i] raise ValueError(f"Cannot list-ize {argname}({i!r}) due to: {ex}") from None return i
199bdc0492c6fcf486af891f606dd0ba3603a092
436,146
def kgs2lbs(sets, sub=0): """Convert a set of weights in kilograms to pounds. :param list sets: Iterable of prescribed weights. :param int sub: Amount to subtract from each converted weight. Useful for when taking into account bar weight. """ return (int(round(x * 2.20462) - sub) for x in sets)
f25b864d7b05d04d9e30138bcb28fa05ffa7b223
571,640
def swap_bits(x, i, j): """ Swap the ith and jth indices in 'x'. """ # Extract the i and j indices to see if they differ. if (x >> i) & 1 != (x >> j) & 1: # Retrieve the bits to flip by shifting to # the ith and jth indices. Create a bit mask. bit_mask = (1 << i) | (1 << j) # Flip bits. # x ^ 1 = 0 when x = 1. # x ^ 1 = 0 when x = 0. x ^= bit_mask return x
a6e788884ea970ef70878f803a1d0616e904e793
567,396
def feet(i): """ feet(i) Return i (in inches) converted to feet. """ return i / 12
bff46c1399aabee1f589dea98c9c43ced30d0756
7,063
def _computeforwardmissing(a, b, match=None): """Computes which files are in b but not a. This is its own function so extensions can easily wrap this call to see what files _forwardcopies is about to process. """ ma = a.manifest() mb = b.manifest() return mb.filesnotin(ma, match=match)
f97810ebbdfa2d516d43b6c2587d4a0c9972e81d
116,808