content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Sequence def _parse_data(column_indices, parts: Sequence[str]) -> dict: """ Converts the ``parts`` (split elements of a data line) into a dictionary based on the indices in ``column_indices``. """ return {col: parts[col_idx] for col, col_idx in column_indices.items()}
9bcd986479df3f54c2d215678c24dfa837287a94
93,041
from typing import List def comment(lines: List[str]) -> List[str]: """Prepend comment characters to each line. :param lines: Lines of text to be commented :return: Same lines of text provided after each line is commented """ return ['// ' + line for line in lines]
dfb83ec627183d1e45eeca3ea937e9a27444f148
93,045
def sort_counts(word_counts, method): """Sort a word count collection. Args: word_counts (collections.Counter): word counts method (str): sorting method """ assert method in ['count', 'alphabetical'], "Invalid sorting method" if method == 'count': word_counts = word_counts.most_common() else: word_counts = sorted(word_counts.items()) return word_counts
9bc21e80e83258bb5a0dfe613ddbf22479f3b299
93,046
def path_to_pattern(path, metadata=None): """ Remove source information from path when using chaching Returns None if path is not str Parameters ---------- path : str Path to data optionally containing format_strings metadata : dict, optional Extra arguments to the class, contains any cache information Returns ------- pattern : str Pattern style path stripped of everything to the left of cache regex. """ if not isinstance(path, str): return pattern = path if metadata: cache = metadata.get('cache') if cache: regex = next(c.get('regex') for c in cache if c.get('argkey') == 'urlpath') pattern = pattern.split(regex)[-1] return pattern
87283f3e9cd9e1cf34114ca2f03a1e6656dcda88
93,049
import functools import tempfile def with_temporary_folder(func): """ Call the decorated funtion under the tempfile.TemporaryDirectory context manager. Pass the temporary directory name to the decorated function """ @functools.wraps(func) def wrapper(*args, **kwargs): with tempfile.TemporaryDirectory() as tmpdirname: return func(*args, tmpdirname, **kwargs) return wrapper
1e3bf82dcd894e615f62e865d87c9bcc6ef4dcb9
93,051
import re def name_to_snake_case(name): """Converts a name from CamelCase to snake_case.""" return re.sub('((?!^)(?<!_)[A-Z][a-z]+|(?<=[a-z0-9])[A-Z])', r'_\1', name).lower()
af58e5448c4f6b520e8412c4b8ba97dd8144e5b4
93,053
def doc_brief(s): """ Returns the first line of an operator's docstring to use as a summary of how the operator works. The line in question must contain *brief*. """ return " ".join(s.split("\n\n")[0].split()[1:]) if s.startswith("*brief*") else s
ae0a3c5fee87de5cdcf596455d3423e5602d7500
93,054
def print_progress(sess, i, loss, losses, train_dict, validation_dict, x_norm, sindy_predict_norm): """ Print loss function values to keep track of the training progress. Arguments: sess - the tensorflow session i - the training iteration loss - tensorflow object representing the total loss function used in training losses - tuple of the individual losses that make up the total loss train_dict - feed dictionary of training data validation_dict - feed dictionary of validation data x_norm - float, the mean square value of the input sindy_predict_norm - float, the mean square value of the time derivatives of the input. Can be first or second order time derivatives depending on the model order. Returns: Tuple of losses calculated on the validation set. """ training_loss_vals = sess.run((loss,) + tuple(losses.values()), feed_dict=train_dict) validation_loss_vals = sess.run((loss,) + tuple(losses.values()), feed_dict=validation_dict) print("Epoch %d" % i) print(" training loss {0}, {1}".format(training_loss_vals[0], training_loss_vals[1:])) print(" validation loss {0}, {1}".format(validation_loss_vals[0], validation_loss_vals[1:])) decoder_losses = sess.run((losses['decoder'], losses['sindy_x']), feed_dict=validation_dict) loss_ratios = (decoder_losses[0]/x_norm, decoder_losses[1]/sindy_predict_norm) print("decoder loss ratio: %f, decoder SINDy loss ratio: %f" % loss_ratios) return validation_loss_vals
695120e6e2a8ba03efa2465408db7fa99342ae3a
93,055
def sort_dict(dict_unsorted): """Sort dictionary by values in reverse order.""" dict_sorted = sorted( dict_unsorted.items(), key=lambda dict_sort: dict_sort[1], reverse=True ) return dict_sorted
1b9be6dcf620e9b9c0074aa5fe03091d23e1b87d
93,063
def get_info(model): """get_info. Create a basic representation of the qubovert object encoded by ``model``. This basic representation is easily shareable. It works so that ``model == qv.utils.create_from_info(qv.utils.get_info(model))`` is True. Parameters ---------- model : type in ``qubovert.BOOLEAN_MODELS`` or ``qubovert.SPIN_MODELS``. Returns ------- res : dict. ``res`` will have many fields. The first is ``type`` which will indicate which qubovert type ``model`` is. Then there will be a ``terms`` field which indicates the terms in the model. Then there may be a ``name`` field which will be set to ``model.name``. Depending on the input type, there may be ``mapping``, ``num_ancillas``, and ``constraints`` fields. Example ------- >>> import qubovert as qv >>> >>> pcbo = qv.PCBO({(0,): 1}).add_constraint_eq_zero({(0,): 1}, lam=0) >>> pcbo {(0,): 1} >>> pcbo.constraints {"eq": [{(0,): 1}]} >>> print(pcbo.name) None >>> info = qv.utils.get_info(pcbo) >>> info {"type"="PCBO", "name"=None, "terms"={(0,): 1}, "mapping"={0: 0}, num_ancillas=0, "constraints"={"eq": [{(0,): 1}]}} >>> qv.utils.create_from_info(info) == pcbo True See Also -------- qubovert.utils.create_from_info : opposite function. """ res = dict( type=model.__class__.__name__, terms=dict(model), name=model.name ) for attr in ("mapping", "num_ancillas", "constraints"): if hasattr(model, attr): res[attr] = getattr(model, attr) return res
22ccda410ea1d882ddc7792e9c68d0f20796f78b
93,065
import requests def extra_metadata_helper(resource_id, headers): """ Build extra metadata dict to help with other integrations. Parameters ---------- resource_id: str The OSF resource ID headers: dict OSF Authorization header Returns ------- Extra metadata dictionary """ # Get project information base_url = "https://api.osf.io/v2/nodes/{}/".format(resource_id) project_info = requests.get(base_url, headers=headers).json() # Build creators list citation_data = requests.get("{}citation/".format(base_url), headers=headers).json() creators = [{ "first_name": author['given'], "last_name": author['family'], "ORCID": None} for author in citation_data['data']['attributes']['author']] # Get license if it exists license = None if 'license' in project_info['data']['relationships'].keys(): license_data = requests.get(project_info['data']['relationships']['license']['links']['related']['href'], headers=headers).json() if license_data['data']['attributes']: license = license_data['data']['attributes']['name'] # See if there's an identifier for this project identifier_data = requests.get("{}identifiers/".format(base_url), headers=headers).json() identifiers = [{ "type": identifier['attributes']['category'], "identifier": identifier['attributes']['value']} for identifier in identifier_data['data']] extra_metadata = { "title": project_info['data']['attributes']['title'], "creators": creators, "publication_date": project_info['data']['attributes']['date_created'], "description": project_info['data']['attributes']['description'], "keywords": project_info['data']['attributes']['tags'], "license": license, "related_identifiers": identifiers, "references": None, "notes": None } return extra_metadata
19038f10fdd11602de7a892fba0a2801ce01fa46
93,066
def generate_new_rule(child_id, parent_tag, parent_id, priority, maxdepth, pkg_filter, intransitive, noconfig): """ Return a full inheritance rule to add for this child tag. :param int child_id: Koji tag id :param str parent_tag: Koji tag name :param int parent_id: Koji tag id :param int priority: Priority of this parent for this child :param int maxdepth: Max depth of the inheritance :param str pkg_filter: Regular expression string of package names to include :param bool intransitive: Don't allow this inheritance link to be inherited :param bool noconfig: Prevent tag options ("extra") from being inherited """ return { 'child_id': child_id, 'intransitive': intransitive, 'maxdepth': maxdepth, 'name': parent_tag, 'noconfig': noconfig, 'parent_id': parent_id, 'pkg_filter': pkg_filter, 'priority': priority}
3eae1d9e5eb47c6941bf9a5767dfd836bcc6d366
93,068
import re def get_common_sids(linesl): """ Use the formatted lines. Return dictionary of common_sid:meaning """ foundstartcsids = False common_sids = {} for line in linesl: if foundstartcsids and "Engine SIDs" in line: break if not foundstartcsids and " Common SIDs" in line: foundstartcsids = True elif foundstartcsids: if re.match(' +[0-9]+[^\.]',line): garbage,sid,meaning = re.split(" {9,}",line) common_sids[sid.strip()] = meaning.strip() return common_sids
96ae8f4fd20f5c04f8c95d12921b033d09855d7f
93,073
def bloblist_to_dict(bloblist): """Returns the bloblist as a dict on the form filename -> blobinfo.""" blobdict = {} for b in bloblist: blobdict[b['filename']] = b assert len(blobdict) == len(bloblist), "Duplicate filename in bloblist" return blobdict
a3cf499e5416b6312a0cbbacafee2a850efb85cb
93,074
def _FormatBytes(byts): """Pretty-print a number of bytes.""" if byts > 2**20.0: byts /= 2**20.0 return '%.2fm' % byts if byts > 2**10.0: byts /= 2**10.0 return '%.2fk' % byts return str(byts)
5c5f008de79977caa641387eb9540555699c332d
93,079
def to_pymunk(x, y): """Convert position of pygame to position of pymunk""" return (x, -(y-600))
cea9404ee7db82d866c69946c06e16f07a364d36
93,084
def format_install_url(app_id: str, location_id: str) -> str: """Return a web-based URL to auth and install a SmartApp.""" return f"https://account.smartthings.com/login?redirect=https%3A%2F%2Fstrongman-regional.api.smartthings.com%2F%3FappId%3D{app_id}%26locationId%3D{location_id}%26appType%3DENDPOINTAPP%26language%3Den%26clientOS%3Dweb%26theme%3Dsmartthings"
c5daa48925fac99ed48c73f741d25514bd0f8b31
93,086
def less_uppers(one, two): """Return the string with less uppercase letters.""" one_count = sum(1 for c in one if c.islower()) two_count = sum(1 for c in two if c.islower()) return one if one_count >= two_count else two
12bb7f5520f526c92155586a47d69918a7dbfa4e
93,088
def instantiate_steady_state_mutable_kwargs(dissolve, block_kwargs, solver_kwargs, constrained_kwargs): """Instantiate mutable types from `None` default values in the steady_state function""" if dissolve is None: dissolve = [] if block_kwargs is None: block_kwargs = {} if solver_kwargs is None: solver_kwargs = {} if constrained_kwargs is None: constrained_kwargs = {} return dissolve, block_kwargs, solver_kwargs, constrained_kwargs
61b4d53ebd3e800e2701a2670784a635f0070606
93,089
import socket import struct def ip2int(ip): """Convert an IP string to long""" packed_ip = socket.inet_aton(ip) return struct.unpack("!I", packed_ip)[0]
6052ad155fcac0c5b2286560c5d4b74afc125237
93,094
def merge_sort(x): """Sort list of integers with mergesort. Input a list of integers an returns the sorted list. When using large lists, beware of Pythons limit for recursion depth. """ merged_numbers = [] if len(x) <= 1: # base case of the recursion return x half_length = int((len(x)/2)) first_half = merge_sort(x[:half_length]) second_half = merge_sort(x[half_length:]) i = 0 j = 0 while(i < len(first_half) and j < len(second_half)): if first_half[i] < second_half[j]: merged_numbers.append(first_half[i]) i += 1 else: merged_numbers.append(second_half[j]) j += 1 merged_numbers += first_half[i:] merged_numbers += second_half[j:] return(merged_numbers)
f6b498dd88927446d65bb70833258a0a027de8a0
93,097
def cli(ctx, dataset_collection_id, maxwait=12000, interval=3, proportion_complete=1.0, check=True): """Wait until all or a specified proportion of elements of a dataset collection are in a terminal state. Output: Details of the given dataset collection. """ return ctx.gi.dataset_collections.wait_for_dataset_collection(dataset_collection_id, maxwait=maxwait, interval=interval, proportion_complete=proportion_complete, check=check)
1f19003c2666acbbd2d144bb641433e516a7832a
93,098
import re def get_split_course(course): """ Parses a course from programdesignation into the ('program, designation') form. e.g. 'CS1101' -> ('CS', '1101') """ return tuple(split_course for course_part in re.findall('((?:[A-Z]+-)?[A-Z]+)(.+)', course) for split_course in course_part)
4ad117ed8cf3cace05d0e566588ec5e965c8cb76
93,100
from typing import List from typing import Dict from typing import Any def validate_required_keys_for_assets_key(assets: List[Dict[str, Any]], asset_required_keys: set) -> bool: """ Check if the required keys in list of assets to be added or updated are present or not :param assets: List of assets :param asset_required_keys: The required keys for adding or updating an asset :return: True if the required values are present else false """ return all(asset.keys() >= asset_required_keys for asset in assets)
f99ebd3436d9ca320418f344519e909bae226a09
93,102
def read_menu_event(window, choices, timeout=None): """Reads the window event. Return `None` if no event to respond to, otherwise return the selected menu item's text.""" event, values = window.read(timeout=timeout) if not event: return "toggle gui" # Update the values to clear the current selection, since we've read it. window["cmd list"].update(values=choices) if event == "Cancel": return "toggle gui" if not values or not values["cmd list"]: return None # Got no command, do nothing. return values["cmd list"][0]
d1af60a8b25ce3fb7565de6fdfc11ee553789ec1
93,103
def valid_paths(view): """Return list of view paths except for Nones""" paths = [v.file_name() for v in view.window().views()] paths = [p for p in paths if p is not None] return paths
b5612da62b2cf7411a0452614397253f20b632c5
93,106
import six def is_callable_tag(tag): """ Determine whether :tag: is a valid callable string tag. String is assumed to be valid callable if it starts with '{{' and ends with '}}'. :param tag: String name of tag. """ return (isinstance(tag, six.string_types) and tag.strip().startswith('{{') and tag.strip().endswith('}}'))
4c18482e329386c6b6e5538ea56527002bf51d40
93,111
def total_of_regular_investment(reg_invest_value, rate, n_periods): """ A special case of total_of_series_of_invest, when the investements are constant and the rate remains constant. Uses math formula instead of recursion. Not super useful except may be to keep track of the formula for other usage, may be where performance would matter (unlikely) >>> total_of_regular_investment(10, 0, 5) 50 The investment is applied at the END of the period and thus does not benefit from its growth >>> total_of_regular_investment(10, 0.01, 1) 10.0 >>> total_of_regular_investment(10, 0.01, 2) 20.099999999999987 >>> total_of_regular_investment(10, 0.01, 5) 51.0100501000001 """ if rate == 0: return reg_invest_value * n_periods else: factor = 1 + rate return reg_invest_value + reg_invest_value * (factor - factor ** n_periods) / ( 1 - factor )
abb65e3404bf12949f31b120bb6c60639016556e
93,112
import math def entropy(counts, sz): """ Calculate the entropy of the data represented by the counts list. Arguments: counts: List of counts. sz: Length of the data in bytes. Returns: Entropy value. """ ent = 0.0 for b in counts: if b == 0: continue p = float(b) / sz ent -= p * math.log(p, 256) return ent * 8
13e1032956037c2973093f0de9b05a7ec78a9fd0
93,113
def normalize(x, inp_max=1, inp_min=-1): """ normalize takes and input numpy array x and optionally a minimum and maximum of the output. The function returns a numpy array of the same shape normalized to values beween inp_max and inp_min. """ normalized_digit = (inp_max - inp_min) * (x - x.min() ) / (x.max() - x.min()) + inp_min return normalized_digit
cf245df451bda1e114ab7b2a1f5e41908ea4a786
93,118
def chk(condition, message='Check failed', exc=RuntimeError): """Check a condition, raise an exception if bool(condition)==False, else return `condition`.""" if not condition: raise exc(message) return condition
34645305cead70a45f919207890f0ee4c186682d
93,119
import re def strip_urls(text, replace=' '): """A simple preprocessing function that strips URLs from text, based on a regex.""" # simple python regex for URLs: bit.ly/PyURLre url_pattern = '(https?://)?(\\w*[.]\\w+)+([/?=&]+\\w+)*' url_re = re.compile(url_pattern) stripped = re.sub(url_re, replace, text) return stripped
cf52978194f16138ea93909d07fa6dec12c6c4e0
93,120
def isLastPage(page_num, count_of_combos, per_page): """Return True if this is the last page in the pagination""" if count_of_combos <= (page_num * per_page): return True return False
e7c4f76e1c9e13dae27ecea742f85f25dee14758
93,128
def bg_thresholds( dark_arr, n_std=3 ): """ Calculate band-wise mean radiance plus 3 standard deviations for pixels in `dark_arr`. Lyzenga et al. 2006 says: "...the blue and green bands are thresholded at the deep-water mean radiance plus three standard deviations." This method will calculate the mean + 3 std for all bands. You'll have to pick out the blue and green ones later if that's what you're after. Parameters ---------- dark_arr : numpy Masked Array Typically, this will be the output of `Lyzenga2006.dark_pixels_array`. n_std : int (Default value = 3) The number of standard deviations to add to the mean. Lyzenga et al. 2006 uses 3 so that's the default. Returns ------- numpy array A 1D array with as many elements as there are bands in `dark_arr`. Each element corresponds to the threshold for its respective band. """ nbands = dark_arr.shape[-1] darkmeans = dark_arr.reshape(-1,nbands).mean(0).data darkstds = dark_arr.reshape(-1,nbands).std(0).data return darkmeans + n_std * darkstds
0658e1c0efe8efb1beaa0798a3e0ae52e419903a
93,132
def parseversion(version): """ Method to parse a version string from an AT or a BDP to turn it into ints so it can be easily compared. Parameters ---------- version : str The string to parse Returns ------- Tuple containing the major, minor, and sub version numbers (all ints) """ # split the string into the components parse = version.split(".") # if all three components are present if len(parse) == 3: major = int(parse[0]) minor = int(parse[1]) sub = int(parse[2]) # if only two are present elif len(parse) == 2: major = int(parse[0]) minor = int(parse[1]) sub = 0 # if only one is present elif len(parse) == 1: major = int(parse[0]) minor = 0 sub = 0 else: raise Exception("Improperly formatted version string, it must conatin 1, 2, or 3 ints.") return (major, minor, sub)
5f70a188da22ea35c0112e8de0e7a96bb0e84ae1
93,141
def binary_switchEndian(s): """ Switches the endianness of a binary string """ return s[::-1]
5d159d82232347352aaece30a6ae8b4d788b578b
93,158
def parse_response(browse_nodes_response_list): """ The function parses Browse Nodes Response and creates a dict of BrowseNodeID to BrowseNode object :param browse_nodes_response_list: List of BrowseNodes in GetBrowseNodes response :return: Dict of BrowseNodeID to BrowseNode object """ mapped_response = {} for browse_node in browse_nodes_response_list: mapped_response[browse_node.id] = browse_node return mapped_response
4827752e5f3540ec58f32f68bb4cf65f6b330d5d
93,160
def count_gender(data_list): """ Conta os tipos de gênero de uma lista de registros. Argumentos: data_list: Lista de registros, contendo a informação de gênero em uma coluna. Retorna: Uma lista com o número de registros 'Male' e 'Female', nesta ordem. """ male = 0 female = 0 for sample in data_list: if sample[-2] == 'Male': male += 1 elif sample[-2] == 'Female': female += 1 return [male, female]
252af2ac0ffcaedef3b86c508274694c9e0a32bf
93,164
def detector_substr(detector): """ change detector string to match file format (e.g., "SCA01" -> "SCA_1") """ return f"{detector[:3]}_{str(int((detector[3:])))}"
878a75146b5bf03d020acfa2b6363c166a3b0e4d
93,165
def get_location_id(manager, location): """Returns location id :param manager: The storage manager which calls this function. :param location: Datacenter short name :return: Returns location id """ loc_svc = manager.client['Location_Datacenter'] datacenters = loc_svc.getDatacenters(mask='mask[longName,id,name]') for datacenter in datacenters: if datacenter['name'] == location: location = datacenter['id'] return location raise ValueError('Invalid datacenter name specified.')
460e05fbdcdbc7a1f3f946c939c5a43aaa44c502
93,169
def append_dot(instring): """ Function returns a string with a single dot at the end, or an empty string if passed an empty object. This is usefull for building dot separated nodes from arbitrary strings that may already terminate with a dot, or be empty or None. """ if not instring: return "" # return empty string if passed None if instring.endswith('.'): return instring return instring + '.'
d8f9300641348a934be0bbd144b436477d0ee1ef
93,176
import math def round_to_interval(value, interval): """Round a number to a given interval.""" return round( round(value / interval + 0.5) * interval, -int(math.floor(math.log10(interval))) )
6c425b7bc2a8edebb58ba64f177dea21433f8e0c
93,178
def is_native_reference(name): """Check if the given name belongs to a natively supported method of Python""" return name in ['int', 'str', 'len', 'filter', 'enumerate', 'float', 'list', 'dict', 'pow', 'sum']
e50dfcb781011f6a522801c7ebd18c052352d4a1
93,181
def get_cmp_sign(a, b): """Convert comparison result to single character representation.""" if a < b: return '<' elif a > b: return '>' return '=='
0ea842b3694ef7193749471167b974426b73bfd3
93,184
import re def FloatStringToFloat(float_string, problems=None): """Convert a float as a string to a float or raise an exception""" # Will raise TypeError unless a string match = re.match(r"^[+-]?\d+(\.\d+)?$", float_string) # Will raise TypeError if the string can't be parsed parsed_value = float(float_string) if "x" in float_string: # This is needed because Python 2.4 does not complain about float("0x20"). # But it does complain about float("0b10"), so this should be enough. raise ValueError() if not match and problems is not None: # Does not match the regex, but it's a float according to Python problems.InvalidFloatValue(float_string) return parsed_value
2ab70906ce3a1ddc9fa7779f26867396411c0684
93,185
from functools import reduce def find_longest_common_prefix_reduce(words:list): """ Find the lcp in a list of words, using 'reduce' functions. """ if not words: return '' def common_start(w1, w2): shorter = w1 if len(w1) < len(w2) else w2 for i in range(0, len(shorter)): if w1[i] != w2[i]: return shorter[:i] return shorter return reduce(common_start, words)
52ef4553bea70b879f8300e41f540cbe1069391b
93,186
import re def get_sentences(paragraph): """Returns a list of sentences from a paragraph This is a rather naive implementation; there are probably better ones out there Assumes that the paragraph has proper punctuation. """ punctuation = re.compile(r'[\.!?]') sentences = [sentence.strip() for sentence in punctuation.split(paragraph)] sentences = filter(lambda x: x, sentences) return sentences
864fc62c3d7c9aa6cde6c86fabbda0e7824815bb
93,187
def amount(amount): """ Format amount to amount string Strip .0 if amount represents an integer """ if amount is None: return amount = round(float(amount), 8) return str(round(amount)) if round(amount) == amount else str(amount)
600e5377c31fcc2dd6383f28569427ddad56d6f6
93,189
def yearly_hours(activities, years=4): """ Get hours by year grouping """ group_data = dict() for record in activities: if not record.cpd_expired(years): yr_grp = record.yr_grp if yr_grp not in group_data.keys(): group_data[yr_grp] = 0 total_hrs = record.total_hrs group_data[yr_grp] += total_hrs return group_data
5f1fd53eb2cfcef08354e14bce93c0dcd1cee4d2
93,191
def gallery(title, image_elem_list): """ Builds an image gallery out of a list of image elements. The gallery element is provided as a way of grouping images under a single heading and conserving space on the output page. Args: title: The title to display image_elem_list: The image elements to display. If a single image element is given it will automatically be wrapped into a list. Returns: A dictionary with the metadata specifying that it is to be rendered as an image gallery """ gal = { 'Type': 'Gallery', 'Title': title, 'Data': image_elem_list, } return gal
60c71266158ebdf937aba9be1c6d46a4c86aa0d7
93,195
import torch def mc_stft(y_s, n_fft, hop_length, win_length): """ Multi-Channel STFT Shape: y_s: [B, C, T] Returns: complex_value: [B, C, F, T] """ assert y_s.dim() == 3 batch_size, num_channels, num_wav_samples = y_s.size() # [B * C, F, T] in C stft_coefficients = torch.stft( y_s.reshape(batch_size * num_channels, num_wav_samples), # [B * C, T] n_fft=n_fft, hop_length=hop_length, window=torch.hann_window(win_length, device=y_s.device), win_length=win_length, return_complex=True ) return stft_coefficients.reshape(batch_size, num_channels, stft_coefficients.shape[-2], stft_coefficients.shape[-1])
a17bc4d38395eb34ea8b4148bc282a7872c6ffbc
93,196
def create_optims_default(*args, **kwargs): """ Function returning an empty optimizer dict Parameters ---------- *args : arbitrary positional arguments (ignored; only provided for api conformity) **kwargs : arbitrary keyword arguments (ignored; only provided for api conformity) Returns ------- dict empty dictionary """ return {}
21d31f2d3375dfeeb093fc2a5b5dc4620016081d
93,199
def confirm_raw( string: str = '' ) -> bool: """Returns a boolean stating whether 'yes' was entered in response to the prompt""" string_to_print = string + ' Enter "yes" to continue: ' if input(string_to_print) == 'yes': return True return False
9d8178db8dccc9a778ed73dd883c5b872a734b06
93,201
import requests def download_song(song_id, name_song, path_folder, format='brstm'): """ Download a song from smash website using its id :song_id: id of song in smash website :name_song: name of song :path_folder: path to downloaded file :return: name of output song """ URL_down = "https://smashcustommusic.net/{}/{}".format(format, song_id) song_file = '{}.{}'.format(name_song, format) path_out = path_folder + song_file headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "en-US,en;q=0.9,fr;q=0.8", "Connection": 'keep-alive', "Cookie": "theme=1", "Host": "smashcustommusic.net", "Referer": "https://smashcustommusic.net/song/" + str(song_id), "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"', "sec-ch-ua-mobile": "?0", "Sec-Fetch-Dest": "document", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-Site": "same-origin", "Sec-Fetch-User": "?1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"} r=requests.get(URL_down, headers=headers) open(path_out, 'wb').write(r.content) return song_file
c474db8226e48d6a084de0bd696407bc85838b1e
93,203
def content_encode(text: str) -> bytes: """ encode the target text to bytes \n :param text: the target text :return: the bytes :rtype bytes """ return text.encode()
af8ddbe5f779600488293c878ecc7e5fa6699462
93,207
def xml_type(val): """Return a type string for writing to an XML file. Parameters ---------- val : any type The value Returns ------- type : str The type of the value to insert in the XML file """ try: return {str: 'string', int: 'int', bool: 'boolean', float: 'float', dict: 'dict', list: 'list', tuple: 'tuple' }[type(val)] except KeyError: return 'string'
f0f8199d2bbdf3c691eadd8b10fc4d7b07e4ab08
93,208
import torch def transform_verts(verts, T): """ Transform vertices using a 4x4 transformation matrix Inputs: - verts: FloatTensor of shape (N, V, 3) giving a batch of vertex positions. - T: FloatTensor of shape (N, 4, 4) giving transformation matrices Outputs: - verts_out: FloatTensor of shape (N, V, 4) giving vertex homogeneous positions (x, y, z, w) where verts_out[i] is the result of transforming verts[i] by T[i]. """ N, V = verts.shape[0], verts.shape[1] dtype, device = verts.dtype, verts.device # Add an extra row of ones to the world-space coordinates of verts before # multiplying by the projection matrix. We could avoid this allocation by # instead multiplying by a 4x3 submatrix of the projectio matrix, then # adding the remaining 4x1 vector. Not sure whether there will be much # performance difference between the two. ones = torch.ones(N, V, 1, dtype=dtype, device=device) verts_hom = torch.cat([verts, ones], dim=2) verts_cam_hom = torch.bmm(verts_hom, T.transpose(1, 2)) return verts_cam_hom
4c4d0bcb4c4feb56dc2b885ae353405c59e9fae0
93,209
import math def cal_matching_score(sequence_len: int): """ Calculate matching score. Args: sequence_len (int): Pattern length. """ return 2 / (1 + math.pow(math.e, -0.1 * sequence_len)) - 1
5e68b9657775e632303b125b69a767da06bf5bf2
93,210
import re def parseValue(value): """Converts a string into a float, int, or string parameter.""" if re.match(r'-*\d+', value): if value.find("e") > 0: return float(value) # e.g. IT=1e5, IT=2.5e7 elif value.find(".") > 0: return float(value) # e.g. to=50.00 else: return int(value) # e.g. M=4 else: return value
3375abfbf10d6104228bc896b3869b724758b4a8
93,211
import math def pearson_C_calc(chi_square, POP): """ Calculate Pearson's C (C). :param chi_square: chi squared :type chi_square: float :param POP: population or total number of samples :type POP: int :return: C as float """ try: C = math.sqrt(chi_square / (POP + chi_square)) return C except Exception: return "None"
ffbc8c033cbda9a6204297d1443956672bf1d341
93,215
def sort_double_char_labels(labels): """Sort double char labels based on the order of repeated, lower and other labels""" repeated_char_labels = [label for label in labels if label[0] == label[1]] lower_char_labels = [label for label in labels if label[0].islower() and label[1].islower() and label not in repeated_char_labels] other_labels = [label for label in labels if label not in repeated_char_labels and label not in lower_char_labels] labels = repeated_char_labels + lower_char_labels + other_labels return labels
979bd0de303706e27a12544f0b201484e8b4f7cb
93,216
import torch def to_gpu(data, device=None): """ Transfer tensor in `data` to gpu recursively `data` can be dict, list or tuple """ if isinstance(data, list) or isinstance(data, tuple): data = [to_gpu(x) for x in data] elif isinstance(data, dict): data = {key: to_gpu(_data) for key, _data in data.items()} elif isinstance(data, torch.Tensor): data = data.contiguous().cuda(non_blocking=True) return data
4ce8d1ce40bbedac0734616b6a915e672284749d
93,218
def is_float_list(iterable): """Checks if all elements of an iterable are floats """ for element in iterable: if type(element) is not float: return False return True
7640eaf28f4ccd13fcf8514eab30451640f8efc1
93,221
def get_accumulative_list(arr): """ Given an array, makes a cumulative one. Meaning that it add arr[0] to arr[1] and arr[1] to arr[2], etc. """ for i in range(1, len(arr)): arr[i] += arr[i - 1] return arr
f959cf18dae4b8291a7ea0e6a0d7f55c9933becd
93,222
def reward_func(win_before, loss_before, tie_win_before, tie_loss_before, win_after, loss_after, tie_win_after, tie_loss_after): """ Returns the reward according to: Reward = 2 [ P(win | state after goal) - P(win| state before goal)] + 1 [P(tie | state after goal) - P(tie | state before goal)]] Author: Jon Vik Updates by: Rasmus Säfvenberg Parameters ---------- win_before : integer Amount of occurrences of winning in regulation before the goal. loss_before : integer Amount of occurrences of losing in regulation before the goal. tie_win_before : integer Amount of occurrences of winning in overtime before the goal. tie_loss_before : integer Amount of occurrences of losing in overtime before the goal. win_after : integer Amount of occurrences of winning in regulation after the goal. loss_after : integer Amount of occurrences of losing in regulation after the goal. tie_win_after : integer Amount of occurrences of winning in overtime after the goal. tie_loss_after : integer Amount of occurrences of losing in overtime after the goal. Returns ------- reward : float The reward calculated by the aforementioned formula. """ # Calculate the total number of occurences prior to the goal all_before = win_before + loss_before + tie_win_before + tie_loss_before # Calculate the total number of occurences after the goal all_after = win_after + loss_after + tie_win_after + tie_loss_after if all_before == 0: p_win_before = 0 p_tie_loss_before = 0 else: # Probability of winning before the goal is scored p_win_before = (win_before + tie_win_before)/all_before # Probability of losing in overtime before the goal is scored p_tie_loss_before = tie_loss_before/all_before if all_after == 0: p_win_after = 0 p_tie_loss_after = 0 else: # Probability of winning after the goal is scored p_win_after = (win_after + tie_win_after)/all_after # Probability of losing in overtime after the goal is scored p_tie_loss_after = tie_loss_after/all_after # Calculate the reward reward = 2 * (p_win_after - p_win_before) + 1 * (p_tie_loss_after - p_tie_loss_before) return reward
e9e3bc109385ee4e5c1710f34bacdebac9e3cedc
93,229
def generate_antisense_sequence(sequence): """Creates the antisense sequence of a DNA strand.""" dna_antisense = { 'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C' } antisense = [dna_antisense[x] for x in sequence[::-1]] return ''.join(antisense)
666e8b4f84667221f980f043d2ac311a0ee5f71c
93,241
def get_edge_nodes(edge, nodes): """Get first and last nodes of an edge. Parameters ---------- edge : dict the edge information nodes : list of dict all available nodes Returns ------- dict information on the first node dict information on the last node """ first_node = next(node for node in nodes if node["id"] == edge["from"]) last_node = next(node for node in nodes if node["id"] == edge["to"]) return first_node, last_node
9ecc49136b07883776d791f47f66f6da09335e64
93,244
def _absolute_and_relative_error(X, Y, norm): """Compute the absolute and relative errors between X and Y where Y is an approximation to X: absolute_error = ||X - Y||, relative_error = ||X - Y|| / ||X|| = absolute_error / ||X||, with ||X|| defined by norm(X). """ norm_of_data = norm(X) absolute_error = norm(X - Y) return absolute_error, absolute_error / norm_of_data
ac3bbd0b4e0b2fbcd6320c785ac3e577a262e001
93,245
def screen_size(root): """Returns (width, height). :param root: A valid window object """ # https://stackoverflow.com/questions/3949844 return (root.winfo_screenwidth(), root.winfo_screenheight())
d36e0386f69706b1c9e5b4b017d545aff1715187
93,247
def get_pod_unique_name(pod): """Returns a unique name for the pod. It returns a pod unique name for the pod composed of its name and the namespace it is running on. :returns: String with namespace/name of the pod """ return "%(namespace)s/%(name)s" % pod['metadata']
4c57b870f32afdbaeba1576611dca496e9bc1b44
93,248
def cleanup_equivalent(labels, shifts, errs, ws, crysts, inds, hashes): """ Gather equivalent graphs (identified by their shift distributions) Inputs: - labels List of labels of the distributions - shifts List of predicted shifts in each distribution - errs List of predicted errors in each distribution - ws List of weights of the distributions - crysts List of crystals in each distribution - inds List of the atoms in each distribution - hashes List of hashes for each graph Outputs: - new_labels Updated list of labels of the distributions - new_shifts Updated list of predicted shifts in each distribution - new_errs Updated list of predicted errors in each distribution - new_ws Updated list of weights of the distributions - new_crysts Updated list of crystals in each distribution - new_inds Updated list of the atoms in each distribution """ # Initialize the updated lists new_labels = [] new_shifts = [] new_errs = [] new_ws = [] new_crysts = [] new_inds = [] new_hashes = [] # Loop over all the distributions for l, sh, er, w, cr, ind, h in zip(labels, shifts, errs, ws, crysts, inds, hashes): # If the distribution is already found, modify the label if h in new_hashes: i = new_hashes.index(h) new_labels[i] += "/{}".format(l) # Otherwise, append the distribution to the updated list else: new_labels.append(l) new_shifts.append(sh) new_errs.append(er) new_ws.append(w) new_crysts.append(cr) new_inds.append(ind) new_hashes.append(h) return new_labels, new_shifts, new_errs, new_ws, new_crysts, new_inds, new_hashes
a13d4b84a1827649b8e9441421bd2fad9667dfed
93,249
def SquareDist(x0, x1, y0, y1): """Computes the squared distance between the two points (x0,y0) and (y1,y1) Returns ------- float squared distance between the two input points """ return (x1 - x0) ** 2 + (y1 - y0) ** 2
85afa63516a641e0846bff9cfdbd2dc163d38021
93,250
import struct def ulonglong_bytearray(l): """ Converts a numeric value representing an unsigned 64-bit integer into a bytestring in big-endian byte order. :param l: Number to encode :return: Packed bytestring in big-endian byte order """ return struct.pack('>Q', l)
f68d1dc40cf7427ef4ea76e72dfe00fe11d54401
93,255
def form_command(parameters): """Flatten a dictionary to create a command list for use in subprocess.run()""" command = [] if "args" not in parameters else parameters.pop("args") for key, value in parameters.items(): if isinstance(value, list): command.extend([key, *value]) else: command.extend([key, value]) return command
7b091f13127111185ff5103d033b35f9b985505f
93,256
def qualify_cpp_name(cpp_namespace, cpp_type_name): # type: (str, str) -> str """Preprend a type name with a C++ namespace if cpp_namespace is not None.""" if cpp_namespace: return cpp_namespace + "::" + cpp_type_name return cpp_type_name
c5423b3bfd84935b41f3defd65f8a2b923759fc3
93,262
import random def random_font(fonts): """Picks a random font from the supplied list of fonts""" return './fonts/' + random.choice(fonts)
9dd96a1376ba00bf5cb9ad001b0f3de64b6836f3
93,263
def machine_lookup_all(session, hostname, public_ip = True): """Lookup all of the IP addresses for a given AWS instance name. Multiple instances with the same name is a result of instances belonging to an auto scale group. Useful when an action needs to happen to all machines in an auto scale group. Args: session (Session) : Active Boto3 session hostname (string) : Hostname of the EC2 instances public_ip (bool) : Whether or not to return public IPs or private IPs Returns: (list) : List of IP addresses """ client = session.client('ec2') response = client.describe_instances(Filters=[{"Name":"tag:Name", "Values":[hostname]}, {"Name":"instance-state-name", "Values":["running"]}]) addresses = [] items = response['Reservations'] if len(items) > 0: for i in items: item = i['Instances'][0] if 'PublicIpAddress' in item and public_ip: addresses.append(item['PublicIpAddress']) elif 'PrivateIpAddress' in item and not public_ip: addresses.append(item['PrivateIpAddress']) return addresses
372e927343507ff1d1a5dea9df30bc01a51e3cc4
93,264
def make_graph(chrom, start, end, flank=150): """ Make a long deletion graph :param chrom: chromosome name :param start: start coordinate (first deleted base) :param end: end coordinate (last deleted base) :param flank: flank length :return: paragraph dict """ assert end - start + 1 >= 2 * flank target_region_l = "%s:%i-%i" % (chrom, max(1, start - flank - 1), start + flank + 1) target_region_r = "%s:%i-%i" % (chrom, max(1, end - flank - 1), end + flank + 1) lf_pos = "%s:%i-%i" % ( chrom, max(1, start - flank - 1), max(1, start - 1)) mid_l_pos = "%s:%i-%i" % ( chrom, start, start + flank - 1) mid_r_pos = "%s:%i-%i" % ( chrom, max(1, end - flank), max(1, end - 1)) rf_pos = "%s:%i-%i" % ( chrom, end + 1, end + flank + 1) graph = { "sequencenames": ["REF", "DEL"], "target_regions": [target_region_l, target_region_r], "nodes": [ { "name": "source", "sequence": "NNNNN" }, { "name": "LF", "reference": lf_pos }, { "name": "MID_L", "reference": mid_l_pos }, { "name": "MID_R", "reference": mid_r_pos }, { "name": "RF", "reference": rf_pos }, { "name": "sink", "sequence": "NNNNN" }, ], "edges": [ { "from": "source", "to": "LF", }, { "from": "source", "to": "MID_R", }, { "from": "LF", "to": "RF", "sequences": ["DEL"] }, { "from": "LF", "to": "MID_L", "sequences": ["REF"] }, { "from": "MID_R", "to": "RF", "sequences": ["REF"] }, { "from": "MID_R", "to": "sink", }, { "from": "RF", "to": "sink", } ], "paths": [ { "nodes": ["LF", "MID_L"], "path_id": "REF|1", "sequence": "REF", "nucleotide_length": 2 * flank }, { "nodes": ["MID_R", "RF"], "path_id": "REF|2", "sequence": "REF", "nucleotide_length": 2 * flank }, { "nodes": ["LF", "RF"], "path_id": "DEL|1", "sequence": "DEL", "nucleotide_length": 2 * flank } ] } return graph
0d0b3c84f057f945b6c79993619b0a6de9053824
93,266
import glob def expand_file_list(input_files): """Find all files in list (expanding wildcards) This function uses `glob` to find files matching each string in the input list. Parameters ---------- input_files : list(str) List of strings representing file names and possibly including wildcards Returns ------- list(str) List of filenames (with wildcards expanded). Each element contains the name of an existing file """ file_list = [] for input_file in input_files: file_list += glob.glob(input_file) return file_list
f6e86c3b0f738265eabc897534f21e75a44fdabc
93,268
def meh2(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh2('1212') 6 >>> meh2('1221') 0 >>> meh2('123425') 4 >>> meh2('123123') 12 >>> meh2('12131415') 4 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + len(captcha) / 2) % len(captcha)]: result += int(captcha[n]) return result
a4ee86a391475f72b1c6b69c4e2035fcaee6eeb7
93,271
def _get_browser(buildername): """Gets the browser type to be used in the run benchmark command.""" if 'android' in buildername: return 'android-chromium' # pragma: no cover elif 'x64' in buildername: return 'release_x64' # pragma: no cover return 'release'
b2cea9d04d741b713344265a94529a4d54e35327
93,275
from typing import Union from typing import Tuple from re import T def match_type_container(typ, container_type_name: Union[str, Tuple[str, ...]]): """Unpack the type parameter from ContainerType[T].""" if typ is None: return None if isinstance(container_type_name, str): container_type_name = (container_type_name,) if not (typ.isinstance_ParameterizedClass() and typ.full_name in container_type_name): return None param = typ.get_formal_type_parameter(T) return param
ecb79785eb511cac5bb391db5d3e4b4793b9e9a2
93,278
def generate_volume_names(tenant, datastore_name, len): """ Returns a list of volume names e.g. volNames = ['tenant1_vol1@sharedVmfs-0', 'tenant1_vol2@sharedVmfs-0', 'tenant1_vol3@sharedVmfs-0'] """ volNames = [] for x in range(len): volNames.append(tenant + "_vol" + str(x + 1) + "@" + datastore_name) return volNames
f3ee745b587799d479a0648adcece09e99d8407e
93,279
def integer_fractional_parts(number): """ Returns a tuple of the integer and fractional parts of a number. Args: number(iterable container): A number in the following form: (..., ".", int, int, int, ...) Returns: (integer_part, fractional_part): tuple. Example: >>> integer_fractional_parts((1,2,3,".",4,5,6)) ((1, 2, 3), ('.', 4, 5, 6)) """ radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] return(integer_part, fractional_part)
ab5ae2b76acf223c025293e35b7e9708afc9b361
93,280
import socket import fcntl import struct def get_ip_address(ifname): """ Get current IP address of a network interface card Params ====== ifname: str Interface name eg: eth0 Return ====== IP: str Current IP of the interface """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa( fcntl.ioctl(s.fileno(), 0x8915, struct.pack("256s", ifname[:15]))[20:24] # SIOCGIFADDR )
99b4b0ff9787506ad6cc6a44fec8baf04d4ae353
93,284
import random def attack(attack_power, percent_to_hit, percent_to_critical=0.01): """Calculates the damage done based on attack power and percent to hit. Also calculates critical strike. Parameters: attack_power - attack power percent_to_hit - percent to hit Optional: percent_to_critical - percent to critical strike [default: 0.01] Returns: Returns damage """ damage_value = 0 # Calculate if creature was hit chance_to_hit = random.random() if chance_to_hit <= percent_to_hit: creature_was_hit = True else: creature_was_hit = False # Calculate final damage value if creature_was_hit: damage_value = random.randint(1, attack_power) if chance_to_hit <= percent_to_critical: damage_value = attack_power + damage_value return damage_value
b13311cf56cc862ffa5ce97ba686d136b29c24f1
93,285
def find_fields(line, field_order=None, field_delims=None): """takes line from BL table and returns dict with field names mapped to info field order is the order of field names to extract from the file and field_delims is a list of index numbers indicating where the field is split """ field_order = field_order or ["taxa", "parent", "bl"] field_delims = field_delims or [0, 21, 36, 49] field_dict = {} for i, f in enumerate(field_order): start = field_delims[i] try: end = field_delims[i + 1] except IndexError: end = None field_dict[f] = line[start:end].strip() return field_dict
3194775ed17e7d3ee916c7b8f6a39ebb7b2d97db
93,288
import pathlib def find_containers(path): """Builds container list from experiment absolute path Parameters ---------- path : str, or pathlib.Path Returns ------- containers : list of pathlib.Path paths to containers """ if not isinstance(path, pathlib.Path): path = pathlib.Path(path).expanduser().absolute() containers = [item for item in path.glob('xy*') if item.is_dir()] return containers
a5d812b0fdf51c833ad878675aa49f5909d27945
93,290
def calculate_center(df_task): """ Returns tuple (Lat, Lon) of center of task """ center = ((df_task['Lat'].max() + df_task['Lat'].min()) / 2, (df_task['Lon'].max() + df_task['Lon'].min()) / 2) return(center)
493ba88572cfdf566ac80b9e9e9a3d599f1f17c8
93,300
def _create_statement_for(table_name, columns): """ :param table_name: name of table to create; does not get escaped, so don't use untrusted user input :type table_name: string :param columns: (column name, SQLite type) pairs :type columns: list of 2-tuples of strings :rtype: string :returns: SQLite CREATE TABLE statement """ columns_def = ", ".join("%s %s" % pair for pair in columns) return "CREATE TABLE %s(%s)" % (table_name, columns_def)
e6f11f7cf328168d79f24fc89a9056063b04386b
93,301
import shlex def find_argument_quoted(pos, text): """ Get the number of the argument at position pos in a string with possibly quoted text. """ sh = shlex.shlex(text) count = -1 w = sh.get_token() while w and w[2] is not None: count += 1 if w[0] <= pos < w[1]: return count w = sh.get_token() return count + 1
59939ee109397a0d7279c1e93e535de8ed9ad1cf
93,303
import torch def rgbd_to_world(p, depth, K, R_ex): """ Given pixel location and depth, get world coordinates :param p: b x 2 tensor :param depth: b tensor :param k: 1 x 3 x 3 tensor :param r_ex: 1 x 3 x 3 tensor :return: p_world_right: b x 3 tensor in right hand coordinate """ n = p.size(0) x_temp = (p[:, 0] + 1 - K[0, 0, 2]) / K[0, 0, 0] y_temp = (p[:, 1] + 1 - K[0, 1, 2]) / K[0, 1, 1] z_temp = 1 x = x_temp / torch.sqrt(x_temp**2 + y_temp**2 + z_temp**2) * depth y = y_temp / torch.sqrt(x_temp**2 + y_temp**2 + z_temp**2) * depth z = z_temp / torch.sqrt(x_temp**2 + y_temp**2 + z_temp**2) * depth p_cam = torch.stack((x, y, z), 1).view(n, 3, 1) # n x 3 p_world = torch.bmm(torch.transpose(R_ex, 1, 2).expand(n, -1, -1), p_cam) p_world_right = torch.stack((p_world[:, 0, 0], p_world[:, 2, 0], -p_world[:, 1, 0]), 1) return p_world_right
c02ddbac25663fdb944477bdc8886abd9e451b7b
93,305
from typing import Iterable from typing import Awaitable from typing import List import asyncio import traceback def run_all(awaitables: Iterable[Awaitable[None]]) -> List[asyncio.Task]: """Asynchronously runs the run_forever method of each lifetime runnable. Creates and runs an asyncio task for the run_forever method of each lifetime runnable. Calls to the run_forever method are wrapped with an error handler. Args: awaitables: An iterable of awaitable objects. Returns: List[asyncio.Task]: A list of asyncio tasks for the awaitables. """ async def run_with_error_handling(awaitable: Awaitable[None]) -> None: try: await awaitable except Exception as e: # These are prints rather than logging because we just want the # result going directly to stdout. print("Task exception: {}".format(e)) print(traceback.format_exc()) return [ asyncio.create_task(run_with_error_handling(awaitable)) for awaitable in awaitables ]
3b01834a3c5cfef333fe602da4ffc2bff00be031
93,306
from typing import Optional from typing import List from typing import Dict from typing import Any from typing import cast def _build_bitcoin_transaction_body( satoshis_per_byte: Optional[int] = None, data: Optional[str] = None, change_address: Optional[str] = None, outputs: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: """Build the json (dictionary) body for a bitcoin transaction given its inputs Args: satoshis_per_byte (int, optional): fee to pay in satoshis/byte. If not supplied, it will be estimated for you. data (str, optional): string to embed in the transaction as null-data output type change_address (str, optional): address to send change to. If not supplied, it will default to the address you are sending from outputs (list, optional): (list of {'to': str, 'value': float} dictionaries. Value float is in BTC) Raises: TypeError: with bad parameter types Returns: Dictionary body to use for sending a bitcoin transaction """ if satoshis_per_byte is not None and not isinstance(satoshis_per_byte, int): raise TypeError('Parameter "satoshis_per_byte" must be of type int.') if data is not None and not isinstance(data, str): raise TypeError('Parameter "data" must be of type str.') if change_address is not None and not isinstance(change_address, str): raise TypeError('Parameter "change_address" must be of type str.') if outputs is not None and not isinstance(outputs, list): raise TypeError('Parameter "outputs" must be of type list.') body = cast(Dict[str, Any], {"version": "1"}) if outputs: body["outputs"] = outputs if satoshis_per_byte: body["fee"] = satoshis_per_byte if data: body["data"] = data if change_address: body["change"] = change_address return body
2af8423a0d14b9230862cda38bb66c9acdddb022
93,310
from typing import Dict from typing import Any def validate_key(config_dict: Dict[str, Any], key: str, value_type: type, default: Any): """ Returns config_dict[key] if the value exists and if of type value_type, otherwise returns a default value. """ return ( config_dict[key] if key in config_dict and isinstance(config_dict[key], value_type) else default )
4115d285f16d3b19ac79945bd3dafc4fb5f3440c
93,313
import collections def check_list_unordered_equal(x, y): """ Function to check if two list are equal. Parameters ---------- x : list y : list Returns ------- result : bool Check if two list are the same Examples >>> check_list_equal([1, 2, 3], [1, 2, 3, 3]) False >>> check_list_equal([1, 2, 3], [1, 2, 3]) True >>> check_list_equal([1, 2, 3, 3], [1, 2, 2, 3]) False """ return collections.Counter(x) == collections.Counter(y)
002c61a7596f8fd0a1b220eef8c4f030d6e901d8
93,314
def greatest_common_divisor(x, y): """Returns the greatest common divisor of a and b.""" while y != 0: temp = y y = x % y x = temp return x
7b8c4e9f9af4fcac28b602c381a6de1d21f12a81
93,316
def SIR(self, y, t, parameters, *args): """ The function that computes the diferential set of equations of the SIR Epidemic Model. :param tuple y: Tuple with the suceptible and infected data. :param array t: The time respective to each y set of samples. :param float Beta: The Beta parameter. :param float r: The r parameter. :return: The derivative of the suceptible and infected data. :rtype: tuple """ # Creating the simulation parameters if len(parameters) == 3: Beta = parameters[0] / (parameters[1] * parameters[2]) r = 1 / parameters[1] else: Beta = parameters[0] / parameters[1] r = 1 / parameters[1] # Simulating the model if len(y) == 2: S, I = y Sdot = -Beta * S * I / self.N Idot = Beta * S * I / self.N - r * I return Sdot, Idot if len(y) == 3: S, I, R = y Sdot = -Beta * S * I / self.N Idot = Beta * S * I / self.N - r * I Rdot = r * I return Sdot, Idot, Rdot
93009d125410afbebbd289c38611fa12987b8492
93,321
def square_to_coord(square): """Convert square to coordinates """ return {0: (7, 0), 1: (7, 1), 2: (7, 2), 3: (7, 3), 4: (7, 4), 5: (7, 5), 6: (7, 6), 7: (7, 7), 8: (6, 0), 9: (6, 1), 10: (6, 2), 11: (6, 3), 12: (6, 4), 13: (6, 5), 14: (6, 6), 15: (6, 7), 16: (5, 0), 17: (5, 1), 18: (5, 2), 19: (5, 3), 20: (5, 4), 21: (5, 5), 22: (5, 6), 23: (5, 7), 24: (4, 0), 25: (4, 1), 26: (4, 2), 27: (4, 3), 28: (4, 4), 29: (4, 5), 30: (4, 6), 31: (4, 7), 32: (3, 0), 33: (3, 1), 34: (3, 2), 35: (3, 3), 36: (3, 4), 37: (3, 5), 38: (3, 6), 39: (3, 7), 40: (2, 0), 41: (2, 1), 42: (2, 2), 43: (2, 3), 44: (2, 4), 45: (2, 5), 46: (2, 6), 47: (2, 7), 48: (1, 0), 49: (1, 1), 50: (1, 2), 51: (1, 3), 52: (1, 4), 53: (1, 5), 54: (1, 6), 55: (1, 7), 56: (0, 0), 57: (0, 1), 58: (0, 2), 59: (0, 3), 60: (0, 4), 61: (0, 5), 62: (0, 6), 63: (0, 7)}[square]
498c8adbd1416ef84969aee0e95eb23dac80503c
93,322
from typing import Union from typing import List from typing import Any def mutget(d: dict, keys: Union[List, Any], value=None): """Returns the value in a nested dictionary, setting anything undefined to new dictionaries except for the last one, which is set to the provided value if undefined. Like dict.get(), but mutates the original dictionary and can handle nested dictionaries/arrays. Arguments: - d -- dictionary - keys -- a single key or a list of keys - value (optional) -- default value to use if not present Examples: my_dict = {'a': {}} ensure_dict(my_dict, ['a', 'b', 'c'], 4) # The return value is 4. # my_dict is now {'a': {'b': {'c': 4}}. my_dict = {'a': {'b': {'c': 17}}} ensure_dict(my_dict, ['a', 'b', 'c'], 4) # The return value is 17. # my_dict does not change. """ if not keys: return d if not isinstance(keys, list): keys = [keys] for key in keys[:-1]: if key not in d: d[key] = {} d = d[key] if keys[-1] not in d: d[keys[-1]] = value return d[keys[-1]]
db4bbf68805a9b1be40916e710214f0fd58437e6
93,323