content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from pathlib import Path def check_file_paths(file_path): """ check if specified input file exists, raises Exception if not :param file_path: absolute or relative path :return: path object if file exists """ if file_path is None: return file_path else: my_path = Path(file_path) if my_path.is_file(): return my_path else: raise FileNotFoundError('There is no file ', file_path)
1d36c2be601c07bc592ba6952b8e27aa856d1e9b
93,933
import itertools def _find_cols(cat_vars_ohe, length, categorical_columns): """ Convert the input one-hot encoding dictionary to a dictionary that can be used for ordinal to one-hot conversion. In addition returns a list of categorical columns including ones defined in input and Args: cat_vars_ohe (dict): Keys are the first column index for each one-hot encoded categorical variable and values are the number of categories per categorical variable. length (int): Total number of columns categorical_columns (list of int): list of the index of the categorical (non-one hot encoded) columns. Returns: tuple: dict: Keys are the new column indexes and values are the values of the non-one hot encoded categorical variables (if a list) or the old column index. list: list of bool values showing if the column is categorical """ if categorical_columns is None: categorical_columns = [] list2d = [list(range(k, k + v)) for k, v in cat_vars_ohe.items()] merged = list(itertools.chain(*list2d)) i, j, k = 0, 0, 0 col_dict = {} categorical = [] while i < length: if i not in merged: col_dict[j] = i if i in categorical_columns: categorical.append(True) else: categorical.append(False) i += 1 j += 1 else: col_dict[j] = list2d[k] i += len(list2d[k]) j += 1 k += 1 categorical.append(True) return col_dict, categorical
d7640d0183359b4f7ef88a286f8beccbbc395a63
93,934
def __clean_bespoke_indicators(missing_value): """ A number of indicators in the WHO GHO database use spaces instead of commas, e.g. a number is formatted as 10 453 000 000.00 instead of 10,453,000,000.00 or even 10453000000.00 as a float type This function handles those cases upfront, rather than editing them later via regex. This isn't particularly needed, as we don't need to fill these columns for these indicators Parameters ---------- missing_value : pd.DataFrame() The ingested data where 'NumericValue' is missing Returns ------- missing_value : pd.DataFrame() The same frame, where the 'Value' entries for these specific cases has had ' ' replaced by ',' """ targets = ['HIV_0000000022', 'Rev_excise', 'Rev_govt_total', 'Rev_imp_other', 'Rev_VAT', 'R_Price_lowest_cost_estimate', 'R_Price_premium_estimate'] unchanged = missing_value.loc[~missing_value['IndicatorCode'].isin(targets)] messy_df = missing_value.loc[missing_value['IndicatorCode'].isin(targets)] messy_df['Value'] = messy_df['Value'].str.strip() messy_df['Value'] = messy_df['Value'].str.replace(' ',',') messy_df['Value'] = messy_df['Value'].replace({'Not,applicable':'Not applicable', 'No,data':'No data', 'Data,not,available':'Data not available'}) missing_value = unchanged.append(messy_df) return missing_value
9ef9fbca59429874b341fe3801a75c6b5dd5a493
93,937
def word_count(text: str, word: str='') -> int: """ Count the number of occurences of ``word`` in a string. If ``word`` is not set, count all words. """ if word: count = 0 for text_word in text.split(): if text_word == word: count += 1 return count else: return len(text.split())
f73ecbad4c81d9f5108c9a82756bd4caf5306a9a
93,940
import re def choose_lang(bot, trigger): """Determine what language to use for queries based on sender/context.""" user_lang = bot.db.get_nick_value(trigger.nick, 'wikipedia_lang') if user_lang: return user_lang if not trigger.sender.is_nick(): channel_lang = bot.db.get_channel_value(trigger.sender, 'wikipedia_lang') if channel_lang: return channel_lang if bot.config.wikipedia.lang_per_channel: customlang = re.search('(' + trigger.sender + r'):(\w+)', bot.config.wikipedia.lang_per_channel) if customlang is not None: return customlang.group(2) return bot.config.wikipedia.default_lang
8ac379ff7fe822626443bb4f8016135cf6b58e78
93,941
def dot(x, y): """Calculates the scalar product between two vectors x and y. The scalar product is the sum of the products of each individual elements. Args: x: The left multiplicand. y: The right multiplicand. Returns: The sum of all z_i for which z_i = x_i * y_i holds. """ result = 0 for i, x_i in enumerate(x): result = result + x_i * y[i] return result
6e90d9fd265e0ba710997f25e3109606b1cb1791
93,942
def filter_dict(data, include, exclude, joinchar='.'): """ Filter a dictionary using the provided include and exclude patterns. :param dict data: The data to filter (dict or OrderedDict, type is respected). :param list include: List of patterns of key paths to include. :param list exclude: List of patterns of key paths to exclude. :param str joinchar: String used to join the keys to form the path. :return: The filtered dictionary. :rtype: dict or OrderedDict """ assert isinstance(data, dict) def filter_dict_recursive(breadcrumbs, element): if not isinstance(element, dict): return element return element.__class__( (key, filter_dict_recursive(breadcrumbs + [key], value)) for key, value in element.items() if is_wanted(joinchar.join(breadcrumbs + [key]), include, exclude) ) return filter_dict_recursive([], data)
e51c0414f278a6cb62a36dd72992f4e80c07f40d
93,947
import math def GetStandardizedWeights(weights): """ Normalizes the weights, such that the absolute maximum weight equals 1.0. Parameters: weights -- the list with the atomic weights """ tmp = [math.fabs(w) for w in weights] currentMax = max(tmp) if currentMax > 0: return [w / currentMax for w in weights], currentMax else: return weights, currentMax
c2be2c83f0ef16716b5ab6720433a69b03ba345a
93,950
def get_indexes(r_peak_times, window): """ computes zero-based indexes of windows for RR-Interval averages :param r_peak_times: data point locations of R-peaks, in seconds :param window: desired window width, in seconds :return: array of indexes """ indexes = [] multiplier = 1 for i in range(0, len(r_peak_times)): if r_peak_times[i] >= multiplier*window: indexes.append(i) multiplier += 1 return indexes
641c1a8b1f1ce10a74e207fa189a9ca4fd5ae09e
93,962
def pack(clone_url: str, sha: str) -> str: """ Pack the source for a commit into a single str """ return clone_url + "|" + sha
ffe577d9bd93fb13e05803665194ff2112406166
93,963
import math def split_in_slices(number, num_slices): """ :param number: a positive number to split in slices :param num_slices: the number of slices to return (at most) :returns: a list of slices >>> split_in_slices(4, 2) [slice(0, 2, None), slice(2, 4, None)] >>> split_in_slices(5, 1) [slice(0, 5, None)] >>> split_in_slices(5, 2) [slice(0, 3, None), slice(3, 5, None)] >>> split_in_slices(2, 4) [slice(0, 1, None), slice(1, 2, None)] """ assert number > 0, number assert num_slices > 0, num_slices blocksize = int(math.ceil(number / num_slices)) slices = [] start = 0 while True: stop = min(start + blocksize, number) slices.append(slice(start, stop)) if stop == number: break start += blocksize return slices
8406263bf08d9f8903d2d992429d3350100c77ab
93,965
import re def get_skip_report_address_by_index(skip_report_list): """Parse validator address from skip report. Based on the index within the skip report file (each line a report), the validator address for this entry gets extracted. In case no address could be found, the whole entry could not been parsed or no report for this index exists, the address is 'None'. """ def extract_address(index): skip_report_entry = ( skip_report_list[index] if index < len(skip_report_list) else "" ) address_findings = re.findall( "^[0-9]+,(0x[0-9,a-f]+?),[0-9,-]+ [0-9,:]+$", skip_report_entry.strip() ) address = address_findings[0] if len(address_findings) == 1 else None return address return extract_address
d9668aef68a2e6042539fe282fd72eac33d1b437
93,966
def get_window_delimiters(number_of_bins_d, scaling_k, first_bin_size, embedding_step_size): """ Get delimiters of the window, used to describe the embedding. The window includes both the past embedding and the response. The delimiters are times, relative to the first bin, that separate two consequent bins. """ bin_sizes = [first_bin_size * 10**((number_of_bins_d - i) * scaling_k) for i in range(1, number_of_bins_d + 1)] window_delimiters = [sum([bin_sizes[j] for j in range(i)]) for i in range(1, number_of_bins_d + 1)] window_delimiters.append(window_delimiters[number_of_bins_d - 1] + embedding_step_size) return window_delimiters
6d26a37a14e818bf0dabab2d12269c59ea7a2cb8
93,967
def ordinal_filter(value): """ Take a number such as 62 and return 62nd. 63, 63rd etc. """ digit = value % 10 if 10 < value < 20: o = 'th' elif digit is 1: o = 'st' elif digit is 2: o = 'nd' elif digit is 3: o = 'rd' else: o = 'th' return '%d%s' % (value, o)
cfba7736bb2803c28df074912b3ff66ded5b936d
93,969
def uppercase_first_letter(string_: str) -> str: """Return string with first character upper case.""" return string_[0].upper() + string_[1:]
89ac25a2981039abf15b2f035aa8296f91acbaf4
93,970
def equals(field, value): """Return function where input ``field`` value is equal to ``value``""" return lambda x: x.get(field) == value
2b49e8b1c803e22cc9f236f35d6498f3bb2a0189
93,974
def group_episodes_by_writer(episodes): """Utilizes a dictionary to group individual episodes by a contributing writer. The writer's name comprises the key and the associated value comprises a list of one or more episode dictionaries. Duplicate keys are NOT permitted. Format: { < writer name >: [{< episode_01 >}, {< episode_02 >}, ...], < writer name >: [{< episode_01 >}, {< episode_02 >}, ...], ... } Parameters: episodes (list): nested episode dictionaries Returns: dict: a dictionary that groups episodes by a contributing writer """ result = {} for ep in range(len(episodes)): print(episodes[ep]['episode_writers']) for write in episodes[ep]['episode_writers']: if write in result: result[write] += [(episodes[ep])] else: result[write] = [(episodes[ep])] return result
89121ce28e62b4c8bf096d91e5c06c9f72386c98
93,980
def time2sec(timestr): """ Conver time specs to seconds """ if timestr[-1] == "s": return int(timestr[0:-1]) elif timestr[-1] == "m": return int(timestr[0:-1]) * 60 elif timestr[-1] == "h": return int(timestr[0:-1]) * 60 * 60 else: return int(timestr)
5d8474c93d8588ece12c19f823999a0dcfd0e383
93,983
def calculate_route_stats(elevation_points): """Calculate few statistics from elevation points.""" total_ascent = 0 total_descent = 0 curr = elevation_points[0] for point in elevation_points: if point > curr: total_ascent += point - curr else: total_descent += curr - point curr = point return int(total_ascent), int(total_descent)
998a5de73bee918d2402c7149627a3bea9eeb183
93,990
def Gamma1_gasrad(beta): """ Gamma1 for a mix of ideal gas and radiation Hansen & Kawaler, page 177, Eqn. 3.110 Parameters ---------- beta : float Gas pressure fraction Pgas/(Pgas+Prad) """ Gamma3minus1 = (2./3.) * (4. - (3.*beta)) / (8. - (7.*beta)) Gamma1 = beta + (4. - (3.*beta)) * Gamma3minus1 return Gamma1
e54c5d4e682df9ae975fe9b1901eb482aa92f8b2
93,993
def get_gap_total(gaps, interval): """Get total length of time for all gaps in a channel Parameters ---------- gaps: array Array of gaps interval: string the interval being warned against """ total = 0 divisor = 1 if interval == "minute": divisor = 60 for gap in gaps: total += int(gap[2] - gap[0]) / divisor return total
acd1b6be48350fd9a83abc74796e5b354e07c174
93,994
import math def dist(p1, p2): """ Compute the euclidean distance between p1 and p2. p1 -- point (x, y) p2 -- point (x, y) """ return math.hypot(p2[0] - p1[0], p2[1] - p1[1])
e37d4db03eca23a82293dd068a0f4edc08bd978a
94,002
def format_func(value, tick_number): """ Function to convert tick labels from seconds elapsed to day of date. """ return int(value / (24*60*60))
eecd0abe14b40d84c37eac9919a1738c50488680
94,004
def period_at_end(token): """ Args: token (str): word being evaluated Returns: binary: True if last character is a period, false if not. """ if list(token).pop() is ".": return True else: return False
528b9301f02a448ddb98b0a5cc4b91894446a73c
94,005
def find_common_prefixes(rows): """ Find any common prefixes for the registry names given in the provided lines, after splitting on _. E.g. PREFIX_a PREFIX_b_c ... Would return {'PREFIX'}. While PREFIX_PREFIX2_a PREFIX_PREFIX2_b_c ... Would return {'PREFIX_PREFIX2'}. We do this by finding the common ancestor that all have. If there is not at root level, then we just return the prefixes at root level, i.e. PREFIX1_a PREFIX2_b_c ... Would return {'PREFIX1', 'PREFIX2'} """ if len(rows) <= 1: return set() splats = [row.name.split("_") for row in rows] previous_common_ancestor = None for i in range(1, max(len(s) for s in splats)): prefixes = set(["_".join(s[:i]) + "_" for s in splats]) if len(prefixes) > 1: return prefixes if previous_common_ancestor is None else previous_common_ancestor previous_common_ancestor = prefixes else: raise RuntimeError("Couldn't find common prefixes!")
ba39805fa1216127f7b495bd32cf553728136be1
94,006
import json def dict_to_masstransit_message(message_type, message_body_dict): """ Method for convert dict with 'message' body and message_type value to MT readable format :param message_type: message_type value, need for MT body :param message_body_dict: dict with data of 'message' for MT :return: MT body, ready for transfer """ # fix formatting and tranform dict to bytes masstransit_message = json.dumps({ 'messageType': [message_type], 'message': message_body_dict, }) return masstransit_message
5fe9c32fa6f983de2ac506cfe99bc3f8b9306d54
94,011
def pop_association(Traits): """Sets the individuals to their respective populations. Also returns the sample counts per population.""" with open(Traits, 'r') as traits: Pops = {} Pop_counts = {} next(traits) for line in traits: line = line.strip().split() Pops[line[0]] = line[1] if line[1] in Pop_counts: Pop_counts[line[1]] += 1 else: Pop_counts[line[1]] = 1 return Pops, Pop_counts
be14c698ac218989866ac901d66d13cb9ece9967
94,013
import string def alpha_to_int(alpha): """ Return a letter's place in the alphabet, or None. For double letters, return it's place in the double-letter alphabet, which starts at 27. """ letters = string.ascii_lowercase if not isinstance(alpha, type('')): return if not (alpha.islower() or alpha.isupper()): """Handle lowercase or uppercase double letters, but not a mix.""" return alpha_map = {value: i + 1 for i, value in enumerate(letters)} double_letters = ["{0}{0}".format(letter) for letter in letters] double_range = list(range(27, 53)) double_map = dict(zip(double_letters, double_range)) alpha_map.update(double_map) return alpha_map.get(alpha.lower(), None)
5272ea532a3c743b9b0cb732718c78672b74b4c7
94,014
def is_popup_visible(view): """Check if popup is visible.""" return view.is_popup_visible()
6a5eab6a34971a79f4fbd934abc927172fda50c5
94,015
import re def _check_for_run_information(sample_name): """ Helper function to try and find run ID information of the form RunXX_YY """ m = re.match('^Run\d+_\d+$', sample_name) if m is not None: return True else: return False
1ddf51cc723b7831912952c1aacf4095dbcb9d9a
94,025
def axes_contains(ax, obj_list): """ Check that a matplotlib.Axes instance contains certain elements. Parameters ---------- ax : matplotlib.Axes Axes instance. obj_list : list of tuples List of tuples, one for each type of object to look for. The tuple should be of the form (matplotlib.object, int), where int is the number of instances of that object that are expected. """ # Get plot elements elems = ax.get_children() # Loop over list of objects that should be in the plot contains_all = False for obj in obj_list: objtype, num_expected = obj num = 0 for elem in elems: if isinstance(elem, objtype): num += 1 if num != num_expected: return False # Return True if no problems found return True
08f9da6dac0564f8969084bd6d3573549567ea52
94,027
from pathlib import Path def remove_front_back_matter(filename: Path, OUTPUT: Path) -> None: """Remove legal information from Project Gutenberg files. Reads the file with 'filename' and outputs the same file with the "proc" word appended at the end of the filename in the 'OUTPUT', but without the lines at the beginning and at the end of the original file containing legal information from Project Gutenberg. Parameters: filename: Path - name of the file to process out_folder: Path - name of the outout folder Returns: None """ assert filename.exists(), f"File {filename} does not exist!" if not OUTPUT.exists(): OUTPUT.mkdir() print(f"Directory {OUTPUT} created!") lines = [] write = False with open(filename, "r", encoding="UTF-8") as f: for line in f: if line.strip().startswith("*** START OF"): write = True continue elif line.strip().startswith("*** END OF"): write = False break if write: lines.append(line) with open(OUTPUT / (filename.stem + "_proc.txt"), "w", encoding="UTF-8") as g: for line in lines: g.write(line) return None
09d0059e87f4a8b5229d225f3fec296aa677e492
94,028
def parse_share_url(share_url): """Return the group_id and share_token in a group's share url. :param str share_url: the share url of a group """ *__, group_id, share_token = share_url.rstrip('/').split('/') return group_id, share_token
c3a0ab6313ff0a26cf746f69d77b0733ba1534e4
94,029
def expectation_2d(values, probabilities): """Calculate the expected value over values in a 2D layout. Args: values (torch.Tensor): Values for each position. probabilities (torch.Tensor): Probabilities for each position. Returns: The expected values. """ prod = values * probabilities *first_dims, height, width = prod.size() mean = prod.view(*first_dims, height * width).sum(-1, keepdim=False) return mean
4a9a0091132da7f416675418c80e65cfe45cd02a
94,033
def pprint_size(value): """Pretty-print size (with rounding)""" for postfix, limit in [("G", 1e9), ("M", 1e6), ("K", 1e3), ("", 1)]: if value >= limit: return "{}{}".format(int(value/limit), postfix)
845904e36a250e4b3675f97a0674ee3207971b8a
94,034
def _get_info_names(profile): """Get names of infos of tasks.""" info_names = sorted(set().union(*(set(val) for val in profile.values()))) return info_names
b692d6046da9371b73e3bd0ae926b61d8507ef7c
94,035
def format_dem_rsc(rsc_dict): """Creates the .dem.rsc file string from key/value pairs of an OrderedDict Output of function can be written to a file as follows with open('my.dem.rsc', 'w') as f: f.write(outstring) Args: rsc_dict (OrderedDict): data about dem in ordered key/value format See `load_dem_rsc` output for example Returns: outstring (str) formatting string to be written to .dem.rsc """ outstring = "" for field, value in rsc_dict.items(): # Files seemed to be left justified with 13 spaces? Not sure why 13 if field.lower() in ('x_step', 'y_step'): # give step floats proper sig figs to not output scientific notation outstring += "{field:<14s}{val:0.12f}\n".format(field=field.upper(), val=value) else: outstring += "{field:<14s}{val}\n".format(field=field.upper(), val=value) return outstring
ec93f9e07a251bb9c361c422142b3ea9d4f96468
94,041
import itertools import networkx def check_logic_forms_eq_pair(lfs: list): """Check equivalency of logic forms pairwise. Parameter: lfs (list): dicts of id (int) and a graph (LogicalFormGraph) Returns: isomorph_pares (list): tuples with ids of equivalent logic forms """ isomorph_pares = [] for logic_form1, logic_form2 in itertools.combinations(lfs, 2): if networkx.is_isomorphic(logic_form1['graph'].graph, logic_form2['graph'].graph): isomorph_pares.append((logic_form1['id'], logic_form2['id'])) return isomorph_pares
a06539f1bd03f6400c607bcc16c40f9c7ee0966d
94,043
import tempfile import yaml def operator_settings_file(settings: dict = {}) -> str: """Yield the path to a file with settings saved as YAML.""" operator_file = tempfile.mkstemp()[1] with open(operator_file, "w") as f: yaml.safe_dump(settings, f) return operator_file
bc1abe8d64950e4923389c6e22d991406000dd7f
94,045
def get_stanford_tag(siera_tag): """Returns the corresponding Stanford NER tag on given Siera entity tag""" mapping = { 'Individual' : lambda _: 'PERS', 'Location' : lambda _: 'LOC', 'Organization' : lambda _: 'ORG', 'Brand' : lambda _: 'O', 'Publication' : lambda _: 'O', 'Hashtag' : lambda _: 'O' } return mapping[siera_tag](None) if siera_tag in mapping else 'O'
67938870bcfa2b9fa94348b8a4e37d667b6e0e24
94,049
def blobfs_detect(client, bdev_name): """Detect whether a blobfs exists on bdev. Args: bdev_name: block device name to detect blobfs Returns: True if a blobfs exists on the bdev; False otherwise. """ params = { 'bdev_name': bdev_name } return client.call('blobfs_detect', params)
2ee244e09cb06284ec7bd7168f7ff250d70b9e2c
94,051
def parse_modifiers(modifiers): """Parse modifiers(regex match object) into type sets then return them.""" weaknesses, immunities = set(), set() sections = modifiers.group(1).split(';') for section in sections: split = [w.strip(',') for w in section.strip().split(' ')] for word in split[2:]: if split[0] == 'weak': weaknesses.add(word) else: immunities.add(word) return (weaknesses, immunities)
1a144fa6317e58a1de7d302fe17ca70aba61bd1b
94,059
def meep_vertices(points, z): """ Converts a list of 2D points into a meep vertices array. """ points_str = [ 'mp.Vector3({}, {}, {})'.format(x, y, z) for x, y in points ] return "[" + ",".join(points_str) + "]"
6a1513043a5ca335e3c48e0e00ddc1a2d4cb2439
94,061
def __datetime_representer(dumper, data): """ Ensures that datetime objects are represented correctly. """ value = data.isoformat("T") return dumper.represent_scalar("tag:yaml.org,2002:timestamp", value)
9cbfd0836306a42cd4aa98548cf4f41a0a690592
94,066
import random def random_spmatrix(n_row, n_col, per_nnz): """ This function output a random sparse matrix. Parameters ---------- n_row : int Number of rows n_col : int Number of columns per_nnz : int Percentage of none zero elements Returns ------- sp_matrix : list Sparse matrix without any storage format nnz_count : int Total none zero elements number row_max_nnz : int Row wise max none zero elements number Examples -------- >>> n_row = 5 >>> n_col = 5 >>> per_nnz = 10 >>> random_spmatrix(n_row, n_col, per_nnz) >>> # return a list fill with random data and 10% zero """ if n_row < 0 or n_col < 0: raise ValueError('The number of rows or columns must > 0') if per_nnz < 0 or per_nnz > 100: raise ValueError('The percentage of nonzeros must between 0 - 100') sp_matrix = [] nnz_count = 0 row_max_nnz = 0 for i in range(n_row): row_data = [] row_nnz_count = 0 for j in range(n_col): r_val = random.randint(0, 100) if r_val < per_nnz: row_data.append(0) else: nnz_count += 1 row_nnz_count += 1 row_data.append(r_val) row_max_nnz = max(row_max_nnz, row_nnz_count) sp_matrix.append(row_data) return sp_matrix, nnz_count, row_max_nnz
fb6c0d12ac28dc438c19430e6769f58e6428d6b6
94,072
import csv def load_csv(filename): """Load a CSV file and return a list with datas (corresponding to truths or predictions). """ datas = list() with open(filename, 'r') as opened_csv: read_csv = csv.reader(opened_csv, delimiter=',') for line in read_csv: datas.append(line[1]) # Clean the header cell datas.remove("Hogwarts House") return datas
c1cd1beb03baaba0ba5ab25d74459e44d5e8a557
94,076
from typing import Union from typing import Counter from typing import Dict from typing import List from typing import Tuple import csv import math import random def create_training_examples(data_path: str, counter: Union[Counter, Dict[str, int]], context_size: int) -> List[Tuple[str, str]]: """Creates training example while subsampling frequent words. Assumes that text is already tokenized.""" total_token_count = sum(counter.values()) examples = [] window_size = context_size // 2 with open(data_path) as f: reader = csv.reader(f) next(reader) # skip header for row in reader: sentence = row[0].split() kept_words = [] for word in sentence: word_count = counter.get(word, 1) word_fraction = word_count / total_token_count prob_of_keeping_word = (math.sqrt(word_fraction / 0.001) + 1) * (0.001 / word_fraction) if random.random() < prob_of_keeping_word: # Randomly dropout words that with a probability related to a terms frequency kept_words.append(word) for i, target in enumerate(sentence): words_to_the_left = sentence[max(0, i - window_size):i] words_to_the_right = sentence[i + 1:i + window_size + 1] for context in words_to_the_left + words_to_the_right: examples.append((target, context)) return examples
6ec24fab1efbbdf17d8f01fe168368953c0877a2
94,079
def _airtovac(w): """Convert air wavelengths to vacuum wavelengths. Don't convert less than 2000 Å. Parameters ---------- w : :class:`float` Wavelength [Å] of the line in air. Returns ------- :class:`float` Wavelength [Å] of the line in vacuum. """ if w < 2000.0: return w; vac = w for iter in range(2): sigma2 = (1.0e4/vac)*(1.0e4/vac) fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2) vac = w*fact return vac
c5fd699b6c3134624479fc7c7901b17c721faea2
94,080
def determine_high_cor_pair(correlation_row, sorted_correlation_pairs): """ Select highest correlated variable given a correlation row with columns: ["pair_a", "pair_b", "correlation"] For use in a pandas.apply() """ pair_a = correlation_row["pair_a"] pair_b = correlation_row["pair_b"] if sorted_correlation_pairs.get_loc(pair_a) > sorted_correlation_pairs.get_loc( pair_b ): return pair_a else: return pair_b
e1260d246de5ad6737e8bc72c78da59fb10f5976
94,082
import math def pythagoreanTheorem(a, b): """ Uses the pythagorean a^2 + b^2 = c^2 on inputs 'a' and 'b' >>> pythagoreanTheorem(3, 4) 5.0 >>> pythagoreanTheorem(6.0, 8.0) 10.0 >>> pythagoreanTheorem(12.45, 16.32) 20.5267 >>> pythagoreanTheorem("Foo", 9) is None True """ if not isinstance(a, (int, float)) or not isinstance(b, (int, float)): return None return round(math.sqrt(a**2 + b**2), 4)
5e10c1ff438f4bf0d9192940a372eab000e89e4b
94,086
def bubble_sort(list_to_sort): """ Sort list in input using bubble sorting. :param list_to_sort: the list to sort :type list_to_sort: list :return: the list sorted :rtype: list """ list_size = len(list_to_sort) for i in range(list_size): for j in range(0, list_size-i-1): if list_to_sort[j] > list_to_sort[j+1] : # Swap current and next element list_to_sort[j], list_to_sort[j+1] = list_to_sort[j+1], list_to_sort[j] return list_to_sort
20570cb38d48dbc5de68ffe3eca23e11bc813c68
94,090
def deal_cards(deck): """ Provide a full deck (52 cards), and divvy them up into two player decks. Parameters: deck - List of cards Returns: A list of lists (one for each player) """ # List of lists to hold player cards players = [[], []] # Split all the cards in the deck between each player for card_num in range(len(deck)): players[card_num % 2].append(deck[card_num]) # Return the players decks return players
b77d2bc8565b76d242bd894179832ae5ddb2813f
94,091
def replace(scope, strings, source, dest): """ Returns a copy of the given string (or list of strings) in which all occurrences of the given source are replaced by the given dest. :type strings: string :param strings: A string, or a list of strings. :type source: string :param source: What to replace. :type dest: string :param dest: What to replace it with. :rtype: string :return: The resulting string, or list of strings. """ return [s.replace(source[0], dest[0]) for s in strings]
ac48bbb9d3b3c99a14859fe9ad14e24100ab0d66
94,092
import torch def CE_criterion(x, y): """ Cross-Entropy loss function. Args: x: output of autoencoder y: input data """ x = torch.clamp(x, 1e-8, 1-1e-8) return -torch.mean(y * torch.log(x) + (1.-y) * torch.log(1.-x))
0ffbc8f102f51651e566039b9096dfdd5846bbdb
94,095
import socket def is_port_enabled(hostname, port): """ To check if a port is enabled or not. For example To check ssh port is enabled or not, is_port_enabled(HOSTNAME, 22) To see glusterd port is enabled, is_port_enabled(HOSTNAME, 24007) """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((hostname, port)) enabled = True except socket.error: enabled = False s.close() return enabled
62f9d60936d2583be3c1b94a9262ad7131def9d1
94,096
def contours(x_vals, y_vals): """Plot defaults for plotting.contours""" aspect_ratio = (y_vals[-1] - y_vals[0]) / (x_vals[-1] - x_vals[0]) figsize = (8, 8 * aspect_ratio) return { 'figsize': figsize }
ef7d138f9112d1bc2f37757953b1b9a085653a8c
94,100
import json def fetch_metadata(fname_json, field): """ Return specific field value from json sidecar. :param fname_json: str: Json file :param field: str: Field to retrieve :return: value of the field. """ with open(fname_json) as f: metadata = json.load(f) if field not in metadata: KeyError("Json file {} does not contain the field: {}".format(fname_json, field)) else: return metadata[field]
0898d76f11f435c967591fbe803c6ebaff20ea7e
94,102
def options(amount=None): """Provides values for options which can be ORed together. If no amount is provided, returns a generator of ever growing numerical values starting from 1. If amount is provided, returns a amount-sized list of numerical values. """ def generator(): exp = 0 cache = None while 1: if cache: cache = cache * 2 else: cache = 2 ** exp yield cache exp += 1 if amount is None: return generator() return [v for _, v in zip(range(amount), generator())]
b93fed97aa05fd3bf0df68503157ee927250fc74
94,106
import shutil def match_stat(dest_path, source_path): """ Matches stats of one fs object to that of another, see shutil.copystat """ return shutil.copystat(source_path, dest_path)
822f8e99f07cb7d1b3d598e21ae3b1cc221ff89a
94,107
from pathlib import Path def get_long_description(readme_file: str) -> str: """ Extract README from provided file. """ readme_path = Path(readme_file) long_description = ( readme_path.read_text(encoding="utf-8") if readme_path.exists() else "" ) return long_description
e650bf5f293f2f319b17af19e687e6a3e8883551
94,111
def get_curve_color(buffer): """ Return prefered color for plots for any buffer Parameters ---------- buffer: str Name of buffer Returns ------- str color name """ color = {'NNO': 'green', 'QFM': 'darkorange', 'IW': 'black', 'HM': 'red', 'CoCoO': 'blue', 'ReReO': 'magenta', 'Graphite': 'gray', 'QIF': 'mediumaquamarine', 'SiSiO2': 'purple', 'CrCr2O3': 'teal', 'MoMoO2': 'olive', 'CaCaO': 'peru', 'AlAl2O3': 'chartreuse', 'KK2O': 'deeppink', 'MgMgO': 'maroon', 'MnMnO': 'midnightblue', 'NaNa2O': 'dodgerblue', 'TiTiO2': 'orangered'} return color[buffer]
8b27f7c19da0e24139e3de357058cce71772d90c
94,113
def get_timeout_error_regex(rpc_backend_name): """ Given an RPC backend name, returns a partial string indicating the error we should receive when an RPC has timed out. Useful for use with assertRaisesRegex() to ensure we have the right errors during timeout. """ if rpc_backend_name in ["PROCESS_GROUP", "FAULTY_PROCESS_GROUP", "TENSORPIPE"]: return "RPC ran for more than" else: return "(Timed out)|(Task expired)"
964375e1dacd337cf7396182752108dc5f2b94a3
94,114
def split_s3_url(s3_url): """ Breaks up s3 URL into bucket path under bucked tail (file or folder) :param s3URL: :return: 3 tuple (bucket, folder, tail) """ s3_url_arr = s3_url.rsplit('/')[2:] if len(s3_url_arr) > 1: return s3_url_arr[0], '/'.join(s3_url_arr[1:-1]), s3_url_arr[-1] elif len(s3_url_arr) == 1: return s3_url_arr[0], "", "" else: raise ValueError("'{}' not a valid S3 URL".format(s3_url))
d50a3a9fe4dd4e26784fc499d087014e0a14f57f
94,115
def lerp(t, x0, x1): """Linearly interpolate between x0 and x1.""" return x0 + t * (x1 - x0)
1408a2acb1b9e4b30b562307988c2e0d47a1020b
94,118
def CLEAR(writer, segments): """This policy DELETES all existing segments and only writes the new segment. """ return []
81a515b1174ca3a72974a4843683d874758bd2bd
94,119
import random import asyncio async def simulate_fetch_one_url(url): """ We simulate fetching of URL by sleeping for a random time """ seconds = random.randint(1, 8) await asyncio.sleep(seconds) return 'url: {}\t fetched in {} seconds'.format(url, seconds)
9c182c6d367a96675dcf8b149d3968e1e6966e2d
94,120
import math def orthogonal_projection_point_to_line(a, b, c, p): """ Return the projection of the point p on the line defined by a, b and c with $x + by + c = 0$. """ p2 = ((b*(b*p[0] - a*p[1]) - a*c)/(math.pow(a,2.)+math.pow(b,2.)), (a*(-b*p[0] + a*p[1]) - b*c)/(math.pow(a,2.)+math.pow(b,2.))) return p2
a9fd172504a3b18d76177e2049c8f560552f2d99
94,121
import torch def to_device(tensor, device: torch.device): """ Convert tensor-like object to given PyTorch device """ if tensor is None: return tensor elif isinstance(tensor, torch.Tensor): return tensor.to(device) elif isinstance(tensor, dict): return {k: to_device(v, device) for k, v in tensor.items()} elif isinstance(tensor, list): return [to_device(v, device) for v in tensor] elif isinstance(tensor, tuple): return tuple(to_device(v, device) for v in tensor) else: raise NotImplementedError
eaa8529bd6fefee0b0e53f1576685f67e9c0ab90
94,127
def mixing(c1, c2, f): """ mixing creates a mixing model between two endmembers Inputs: c1 = concentration of endmember 1 c2 = concentration of endmember 2 f = fraction of endmember 1 in the model Returns: cm = concnetration of the mixture """ cm = c1 * f + c2 * (1 - f) return cm
15915dbc1225974b33ee070afdce20e387df1bf7
94,129
def load_data(filepath, split = True): """ Load and return the data stored in the given path. The data is structured as follows: Each line contains four columns separated by a single space. Each word has been put on a separate line and there is an empty line after each sentence. The first item on each line is a word, the second, third and fourth are tags related to the word. Example: The sentence "L. Antonielli, Iprefetti dell' Italia napoleonica, Bologna 1983." is represented in the dataset as: L author b-secondary b-r . author i-secondary i-r Antonielli author i-secondary i-r , author i-secondary i-r Iprefetti title i-secondary i-r dell title i-secondary i-r ’ title i-secondary i-r Italia title i-secondary i-r napoleonica title i-secondary i-r , title i-secondary i-r Bologna publicationplace i-secondary i-r 1983 year e-secondary i-r . year e-secondary e-r IMPORTANT NOTE: by convention, the first line of every loaded dataset is empty. :param filepath: Path to the data :param split: return word and tags as separated lists if True (default for compatibility). :return: If split: - True: four arrays: The first one contains sentences (one array of words per sentence) and the other threes are arrays of tags. - False: a matrix, each row is a line and columns are: word, tag1, tag2, tag3. """ # Arrays to return if splitted dataset rows words = [] tags_1 = [] tags_2 = [] tags_3 = [] # Array to return if full dataset rows rows = [] word = tags1 = tags2 = tags3 = row = [] with open (filepath, "r", encoding='utf8') as file: for line in file: if 'DOCSTART' not in line: #Do not take the first line into consideration # Check if empty line if line in ['\n', '\r\n']: # Append line # as splitted columns words.append(word) tags_1.append(tags1) tags_2.append(tags2) tags_3.append(tags3) # as whole row row = [word, tags1, tags2, tags3] rows.append(row) # Reset word = [] tags1 = [] tags2 = [] tags3 = [] else: # Split the line into words, tag #1, tag #2, tag #3 w = line[:-1].split(" ") word.append(w[0]) tags1.append(w[1]) tags2.append(w[2]) tags3.append(w[3]) if split: return words,tags_1,tags_2,tags_3 else: return rows
28b34524469e948b40037c5b6a1039e10e45a334
94,133
def reverter_1(frase: str) -> str: """ >>> reverter_1('the sky is blue') 'blue is sky the' >>> reverter_1('uma baita frase') 'frase baita uma' """ lista_de_palavras = frase.split() # Tempo e memória linear palavras_reordenadas = reversed(lista_de_palavras) return ' '.join(palavras_reordenadas)
bff89726903c0d50cc7e6d61a888bad041518ae9
94,134
def flatten(tensor): """ Flatten input tensor as the shape of (nb, nf) :param tensor: input Tensor :type tensor: torch.Tensor :return: flattened tensor :rtype: torch.Tensor """ assert len(tensor.shape) >= 2 if len(tensor.shape) > 2: flattened = tensor.view(tensor.shape[0], -1) else: flattened = tensor return flattened
d9103fd3554b8186a0c0a9840b914315d5646334
94,137
def series(n_terms=1000): """Estimate e with series: 1/1 + 1/1 + 1/(1*2) + 1/(1*2*3) + ...""" def factorial(n): result = 1 for i in range(1, n+1): result *= i return result print(sum([1/factorial(i) for i in range(n_terms)]))
bf66653afd6c316c76c8f95f53777657e2b31cb4
94,140
def evaluate_risks(data, predict_f, loss_f, model_param): """Returns the risk of a model for various loss functions. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_f: Function that outputs model's specific loss function. model_param: Model parameters. Returns: Dictionary of risks for following loss functions: (model's loss, 0/1, adversarial risk wrt a single norm-ball). """ inputs, labels = data pred = predict_f(model_param, inputs) loss = loss_f(model_param, inputs, labels) zero_one_risk = (1 - (pred == labels)).mean() return { 'loss': loss, 'zero_one': zero_one_risk, }
ebcaa3de82db50b82d4dc2267439cdf4e33b373a
94,141
from pathlib import Path def shorten_path(file_path, length): """ Split the path into separate parts, select the last 'length' elements and join them """ return str(Path(*Path(file_path).parts[-length:]))
e5583b04e77b3caa87219b232dd330d48f8612d4
94,142
def span(ser): """Calculate the span of the passed `pd.Series`.""" if ser.dtype.name in ('object', 'category'): return ser.nunique() else: return ser.max() - ser.min()
5d198afa68d27565616c170810712feecf16fe2e
94,148
def time_range_to_mongo_query(start_at, end_at, inclusive=False): """ >>> from datetime import datetime as dt >>> time_range_to_mongo_query(dt(2012, 12, 12, 12), None) {'_timestamp': {'$gte': datetime.datetime(2012, 12, 12, 12, 0)}} >>> expected = {'_timestamp': { ... '$gte': dt(2012, 12, 12, 12, 0), ... '$lt': dt(2012, 12, 13, 13, 0)}} >>> time_range_to_mongo_query( ... dt(2012, 12, 12, 12), dt(2012, 12, 13, 13)) == expected True >>> time_range_to_mongo_query(None, None) {} """ mongo = {} if start_at or end_at: mongo['_timestamp'] = {} if start_at: mongo['_timestamp']['$gte'] = start_at if end_at: comparator = '$lte' if inclusive else '$lt' mongo['_timestamp'][comparator] = end_at return mongo
9d7cb8a9bc6ea51e995f5144bdd40c6b61346344
94,151
def _entry_allocated_bitmap(self, entry_number): """Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise. """ index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
faac4b07b09b001940e0e00562e848ab39bfd744
94,155
import torch def squared_difference(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: """ Compute (x - y)(x - y) element-wise. """ return (x - y) ** 2
c95b8c1c71a0dbacea405679874ac37e4dd82384
94,158
def apply_operator(args): """Helper funtion to apply opeartor to a vector.""" operator, vec = args return operator * vec
02e549fbb188cc5f7b628247ee089c7d37db7fa3
94,159
def plain (gate): """ Returns non-inverted gate name """ return gate.lstrip("~")
c6e75906d82c6ac07240f52fa199102f695cbfa9
94,160
from typing import List def base_arguments() -> List[str]: """Define reusable base arguments for tests in current module.""" return ["../test/skiptasks.yml"]
c146fdb85308ecd3faf68505aabe28ec189ce66d
94,162
def get_orthographies_by_name(engine): """Return a dict form orthography names to the largest id corresponding to an orthography with that name.""" orthographies = {} engine.execute('set names utf8;') query = 'SELECT id, name FROM orthography;' result = engine.execute(query).fetchall() for id, name in result: orthographies.setdefault(name, []).append(id) for name, ids in orthographies.items(): orthographies[name] = max(ids) return orthographies
566f4267144b9704a0cb5bc3106b79ff37d52ad1
94,168
def menu_option_names_to_titles(menu_option_infos): """Build a dictionary mapping menu option names to titles.""" return dict((menu_option_info[0], menu_option_info[1]) for menu_option_info in menu_option_infos)
49f0aba29dd12b8e59ebb46525e86c4e72ae21ae
94,172
import math def ca_rms_only(struct_a,struct_b,residue_list): """calculate the CA rmsd of two structures using the residues in residue_list""" residues_a = struct_a.get_residues(); residues_b = struct_b.get_residues(); d_2_sum = 0.0 resn = 0 for (res_a, res_b) in zip(residues_a, residues_b): if res_a.get_id()[1] not in residue_list: continue CA_a = res_a['CA'] CA_b = res_b['CA'] distance_2 = (CA_a-CA_b)**2 d_2_sum += distance_2 resn += 1 rmsd = math.sqrt(d_2_sum/resn) return rmsd
626e18ebf8786b330f7ab9508acf5d78872264d5
94,174
def remove_prefix(text, prefix): """Removes the prefix `prefix` from string `text` in case it is present.""" return text[len(prefix):] if text.startswith(prefix) else text
e7d5b43b36e6e58cba5ce74eeca003e5354fa364
94,175
from typing import Mapping def _merge_once(source, update): """Recursively update a mapping with new values. Args: source: Mapping to be updated. update: Mapping whose key-value pairs will update those in source. Key-value pairs will be inserted for keys in update that do not exist in source. Returns: Recursively updated mapping. """ for key in update: if key in ["patch_files", "diagnostics"]: source.setdefault(key, []).extend(update[key]) elif ( key in source and isinstance(source[key], Mapping) and isinstance(update[key], Mapping) ): _merge_once(source[key], update[key]) else: source[key] = update[key] return source
eb655aaccd5d02a2d31e1f488962b9e220ca59d4
94,178
def _get_friendly_name(xml): """ Extract device name from description xml xml -- device description xml return -- device name """ try: return xml['root']['device']['friendlyName'] except Exception as e: return 'Unknown'
7841efa1037f43f501b14ba213ecc29742b705c2
94,180
def post(tokens): """ post-process output from NLTK tokenizer Args: tokens: a list contains a tokenized text Returns: processed tokens """ out = [] for t in tokens: if t[-1] == ".": out.append(t[:-1]) else: out.append(t) return out
dca1e3c925a09d0791410b28e62093efb3c459c3
94,181
def _count_datacenters(grouped_networks): """Count the number of datacenters in each group of networks Returns: list of tuples: the first element is the group key, while the second element is the number of datacenters in each group. """ return ((key, len(set(n['datacenter_id'] for n in group))) for key, group in grouped_networks.items())
c97d68574d661ac3b7cff1dc3c31ac81a7f2a1e3
94,194
from typing import List from typing import Any from typing import Counter def duplicatesInList(l: List[Any]) -> List[Any]: """ Return duplicate elements in the given list Source: https://stackoverflow.com/a/9835819/2972183 """ return [item for item, count in Counter(l).items() if count > 1]
2b99aed61575cc15bdbb26d6cffc663dd59a8aaa
94,195
from typing import Dict from typing import Any import yaml import json def load_config_file(path: str) -> Dict[str, Any]: """Load a YAML file into a dict. Extensions accepted are `{.yml, .yaml}`. Arguments: path: The relative path to the YAML file to load. Returns: A dict version of the YAML file. """ file_ext = path.split('.')[-1] if file_ext in {'yml', 'yaml'}: with open(path, 'rb') as file: config = yaml.load(file, Loader=yaml.FullLoader) elif file_ext == 'json': config = json.loads(path) else: raise NotImplementedError('unrecognized file extension .{:s} for file {:s}'.format(file_ext, path)) return config
fdbfd9a6a7869e59b3fe6aab503c1ff67815bd93
94,196
def regex_unanchor(regexp): """ ES regexps are anchored, so adding .* is necessary at the beginning and end to get a substring match. But we want to skip that if front/end anchors are explicitly used. Also avoid doubling up wildcards, in case the regexp engine is not smart about backtracking. """ if regexp.startswith('^'): regexp = regexp.lstrip('^') elif not regexp.startswith('.*'): regexp = '.*' + regexp if regexp.endswith('$'): regexp = regexp.rstrip('$') elif not regexp.endswith('.*'): regexp += '.*' return regexp
0f65afd150a9bd7e7b2a7a80f9deb6566e206a8b
94,198
def recurring_event_datetime(event): """Do not catalog datetime of recurring event.""" return None
5a4060fce82e805e4ad7e40b0f1c74992c59165b
94,200
def add_EOS(data, sequence_lengths, EOS=-1): """ For the decoder targets, we want to add a EOS symbol after the end of each sequence @param data in **batch-major** format @param sequence_lengths is a array-like object with the sequence lengths of the elements in the data object @param EOS is the end-of-sequence marker """ for i, sequence in enumerate(data): sequence[sequence_lengths[i]] = EOS return data
050a56655abbea9feca0610c1550eb30125424c9
94,206
import hashlib def get_sanitised_kubernetes_name( name: str, replace_dots: bool = False, replace_forward_slash: bool = False, length_limit: int = 0, ) -> str: """ Helper to ensure that any names given to Kubernetes objects follow our conventions replace_dots is an optional parameter for objects such as Containers that cannot contain `.`s in their names (in contrast to objects such as Pods that can) replace_forward_slash is an optional parameter for objects that may contain / in their "pretty" names, but that cannot contain / in their Kubernetes name NOTE: For names exceeding the length limit, we'll truncate the name and replace the truncated portion with a unique suffix. """ name = name.replace("_", "--") if name.startswith("--"): name = name.replace("--", "underscore-", 1) if replace_dots: name = name.replace(".", "dot-") if replace_forward_slash: name = name.replace("/", "slash-") if length_limit and len(name) > length_limit: # for names that exceed the length limit, we'll remove 6 characters from # the part of the name that does fit in the limit in order to replace them with # -- (2 characters) and then a 4 character hash (which should help ensure uniqueness # after truncation). name = ( name[0:length_limit - 6] + "--" + hashlib.md5(name.encode("ascii")).hexdigest()[:4] ) return name.lower()
782dab1eb3849c047ce9670b2539ff7e4a4f92cd
94,208
from typing import Dict from typing import Callable def create_factory(possibilities_dict: Dict[str, Callable], are_methods: bool = False) -> Callable: """ A generic method for creating factories for the entire project. Args: possibilities_dict(Dict[str, Callable]): The dictionary which maps object types (as strings!) and returns the relevant class constructors. are_methods(bool): A flag, true if the factory output are methods, rather than objects. Defaults to False Returns: The factory function for the given classes/methods mapping. """ def factory_func(requested_object_type: str): # Inner function! if requested_object_type not in possibilities_dict: raise ValueError("Object type {0} is NOT supported".format(requested_object_type)) else: if are_methods: return possibilities_dict[requested_object_type] else: return possibilities_dict[requested_object_type]() return factory_func
03086b7350bb14e65e14c0525e164d5e9ffbce47
94,209
def header(mesg): """Return a simple header string for given message.""" return '\n' + mesg + '\n' + '=' * len(mesg)
84e187c4260bce38fde3fdfc6cdc5a51f37823d6
94,216
def get_model_name(model): """ Returns the name of the model """ return model._meta.model_name
1d7113259b2e3edbf49036aa335c6329d218a7ee
94,218
def parse_linedata(linedata: str) -> dict: """ Authors: Zakaria Ismail, Yanglong Liu RETURNS a dict that contains organized information from line line is a str that contains text file data >>> parse_linedata('miss_sullivan.png batch_result.png 2 X V P') {"filename": "miss_sullivan.png", "save_file_as": batch_result.png", "commands": ['2', 'X', 'V', 'P']} """ datalist = linedata.split() data = { "filename": datalist[0], "save_file_as": datalist[1], "commands": datalist[2:] } return data
b11d304da8ee2871528eef00f5c6ffddbbe933a3
94,227