content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def GetTKelvinFromTCelsius(TCelsius: float) -> float: """ Utility function to convert temperature to Kelvin (K) given temperature in degree Celsius (°C). Args: TCelsius: Temperature in degree Celsius (°C) Returns: Temperature in Kelvin (K) Notes: Exact conversion. """ # Zero degree Celsius (°C) expressed as Kelvin (K) ZERO_CELSIUS_AS_KELVIN = 273.15 TKelvin = TCelsius + ZERO_CELSIUS_AS_KELVIN return TKelvin
fc1dcc10b8927b871c5e97509b3bc09ba91b2198
84,990
def percentChange(startValue : float, endValue : float) -> float: """returns percent change between two points Args: startValue (float): starting point endValue (float): ending point Returns: float: percent change """ try: x = (endValue - startValue)/abs(startValue) *100 if x == 0.0: return 0.0000000001 else: return x except ZeroDivisionError: return 0.0000000001
cf4005602c64295de2e3ef5b799bf7cd0acce65f
84,993
def result_sink_retcode_from_result_set(result_set): """Determines whether any interactions with ResultSink failed. Args: result_set: A json_results.ResultSet instance. Returns: 1 if any Result in |result_set| failed to interact properly with ResultSink, otherwise 0. """ return int(any(r.result_sink_retcode for r in result_set.results))
49a6ed59ebd820001e702aef5a3a3d584e50f283
84,996
def hamming_distance(string_1, string_2): """Calculate the Hamming Distance between two bytestrings.""" if len(string_1) != len(string_2): raise ValueError('Length of inputs must match.') return sum(bin(char_1 ^ char_2).count('1') for char_1, char_2 in zip(string_1, string_2))
68f3c10de7f2e4c3ede15abf84c68de88b46df68
84,998
def fetch_default_values_for_name(default_values, config_name): """ Fetch the default values for a config name. :param default_values Default values (dict) :param config_name Configuration name (str) """ default_keys = default_values.keys() split_parent_names = config_name.split(":")[:-1] current_values = {} current_parent = "" # Fetch global defaults if "" in default_keys: current_values = default_values[""] # Check for each namespaces for parent in split_parent_names: # Compute parent name if current_parent == "": current_parent = parent else: current_parent = ":".join((current_parent, parent)) if current_parent in default_keys: values = default_values[current_parent] current_values.update(values) return current_values
9403fb4467a7c7b02fdab03503ecaf26858b9d2a
85,002
def check_unit(area): """Check plural (square meter or square meters)""" if area > 1: return "square meters" else: return "square meter"
db78ac39cea15d6123a678a8a869e7faa5e2c459
85,015
import inspect def classes_in_module(module): """Returns a generator over the classes defined in :obj:`module`.""" return (symbol for symbol in dir(module) if inspect.isclass(getattr(module, symbol)))
529112e25cd1d312be08c93d90c2ed3e750fe711
85,018
def unloadProb(cohesin, args): """ Defines unload probability based on a state of cohesin """ if cohesin.any("stalled"): # if one side is stalled, we have different unloading probability # Note that here we define stalled cohesins as those stalled not at CTCFs return 1 / args["LIFETIME_STALLED"] # otherwise we are just simply unloading return 1 / args["LIFETIME"]
ece44685e7f938c85b4266464b7ac1cc28a32363
85,019
import torch def imag(z): """ Returns the imaginary part of pytorch tensor. Parameters ---------- z : tensor Input. Returns ------- tensor Output. """ if torch.is_complex(z): return z.imag else: return torch.zeros_like(z)
6007fdc0d0b282360c8a1375b842d59618c2c417
85,021
def read_flag_file(filename): """ Reads the flag file, ignoring comment lines """ with open(filename, "r") as myfile: lines = myfile.read().splitlines() # filter the lines lines = [l for l in lines if l.startswith("-")] return " ".join(lines)
d990347b56d7f85339eb0c0dbad925680010fd6f
85,022
import logging def prepare_alfred_formatter(exp_id: str) -> logging.Formatter: """Returns a :class:`~logging.Formatter` with the standard alfred logging format, including experiment id. """ formatter = logging.Formatter( ("%(asctime)s - %(name)s - %(levelname)s - " f"experiment id={exp_id} - %(message)s") ) return formatter
dabfafba0065537c9182c1c8e341400c42135246
85,024
def inside(shape, center, window): """ Returns boolean if a center and its window is fully contained within the shape of the image on all three axes """ return all([(center[i]-window[i] >= 0) & (center[i]+window[i] <= shape[i]) for i in range(0,3)])
3427db0a4372e6db6190675e0d4c6e9ff16d71c9
85,028
def long_tube_coeff(mass_flux, D, c_p, mu, k): """Convection coefficient for flow in a long tube. This model for convection was developed from experiments with fully-developed flow in long tubes. It is taken from Eq. 11.35 in Hill & Peterson: .. math:: \frac{h}{G c_p} = 0.023 (\frac{G D}{ \mu_b})^{-0.2} (\frac{\mu c_p}{k})_b^{-0.67} where :math:`G` is the average mass flux through the tube, and the subscript :math:`b` denotes properties evaluated at the bulk fluid temperature. References: [1] P. Hill and C.Peterson, "Mechanics and Thermodynamics of Propulsion", 2nd edition, 1992. Arguments: mass_flux (scalar): Mass flux through the tube [units: kilogram meter**-2 second**-1]. D (scalar): Tube (hydraulic) diameter [units: meter]. c_p (scalar): Heat capacity at constant pressure of the fluid flowing through the tube [units: joule kilogram**-1 kelvin**-1]. mu (scalar): viscosity of the fluid [units: pascal second]. k (scalar): Thermal conductivty of the fluid [units: watt meter**-1 kelvin**-1] Returns: scalar: The convection coefficient :math:`h` [units: watt meter**-2 kelvin**-1]. """ Pr = mu * c_p / k Re = mass_flux * D / mu h = 0.023 * mass_flux * c_p * Re**-0.2 * Pr**-0.67 return h
c85f8cd2be4a8d5d6e487129f5d613c54e78aefa
85,029
import json def parseListFile(listfname:str): """Parses the list file The list file could be either line list of validators' names OR a file with a json array. starts with '[' Arguments: listfname {str} -- [description] Returns: list of validator names """ with open(listfname,'r') as f: fcont=f.read() fcont=fcont.strip() if fcont.startswith('['): # it's json array return json.loads(fcont) else: # it should be a line list flines=fcont.split('\n') for l in flines: if l.strip().startswith('#'): # it's commented out flines.remove(l) return flines
172b737dd8b2f425a65b1e66cea4997a3835f66d
85,031
def arcmin_to_deg(x): """ Convert *x* from [arcmin] to [deg]. """ return x / 60
74ceef28eecd0f338d2a8fe147678a0be480e62f
85,035
def dist(pos_a, pos_b): """ Distance between two points """ return int(((pos_a[0] - pos_b[0])**2 + (pos_a[1] - pos_b[1])**2)**0.5)
824617a0886b55cc8e03f7b5b0295649884ee572
85,039
import base64 def base64_encode(content, encoding='utf-8'): """ b64encode wrapper to work with str objects instead of bytes base64.b64encode requires a bytes object (not a str), and returns a bytes object (not a str) for JSON serialization we want str :param content: string to base64-encode :param encoding: encoding of the input string :return: base64-encoded string using utf-8 encoding """ return base64.b64encode(content.encode(encoding)).decode()
5c05a679fd8b9883fe3f76a761314cd0a2940309
85,042
def get_current(wps): """ Returns only waypoints after and including the current waypoint """ cwps = [] include = False for wp in wps: if wp.is_current: include = True if include: cwps.append(wp) return cwps
0154528fef08aa24847811fca0b1cd2a76f511bc
85,047
def potential_domain_matches(domain): """Potential domain matches for a cookie >>> potential_domain_matches('www.example.com') ['www.example.com', 'example.com', '.www.example.com', '.example.com'] """ matches = [domain] try: start = domain.index('.') + 1 end = domain.rindex('.') while start < end: matches.append(domain[start:]) start = domain.index('.', start) + 1 except ValueError: pass return matches + ['.' + d for d in matches]
2fb9e85832715dd497fb374b885d63201d788c56
85,050
def time_to_text(time): """Get a representative text of a time (in s).""" if time < 0.001: return str(round(time * 1000000)) + " µs" elif time < 1: return str(round(time * 1000)) + " ms" elif time < 60: return str(round(time, 1)) + " s" else: return str(round(time / 60, 1)) + " min"
f87934f66c82c834f18d94189c67c22f6b8ef45f
85,051
def url_to_id(url: str) -> str: """ Parse the given URL of the form `https://arxiv.org/abs/1907.13625` to the id `1907.13625`. Args: url: Input arxiv URL. Returns: str: ArXiv article ID. """ # Strip filetype if url.endswith(".pdf"): url = url[:-4] return url.split("/")[-1]
1f5082a64204578f3a49e63a61bd73a988eac6f6
85,053
import typing def clamp( value: typing.Union[int, float] = 0.5, minimum: typing.Union[int, float] = 0, maximum: typing.Union[int, float] = 1 ) -> typing.Union[int, float]: """ Returns the clamped value between *minimum* and *maximum* A clamped value is the value itself when it lies between the minimum and the maximum, and if the value crosses the extremes, then the extremity closer to the value becomes the clamped value. E.g.: clamp() -> clamp(0.5, 0, 1) -> 0.5; clamp(2, 0, 1) -> 1; clamp(-1, 0, 1) -> 0; """ if value < minimum: return minimum if value > maximum: return maximum return value
f665037152e72b4190daa7a643f0813f59f0f4dc
85,054
import re def create_source_url(date_string: str) -> str: """Creates the URL to source file of date. Args: date_string (str): A date string. Returns: A URL string of the source file. """ if not re.match(r"^(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])-20\d\d$", date_string): raise ValueError("Invalid date format, must be MM-DD-YYY! Try using normalise_datetime()!") gh_branch_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master" source_path = "/csse_covid_19_data/csse_covid_19_daily_reports/" file_name = f"{date_string}.csv" return gh_branch_url + source_path + file_name
4a920c2273d713efc4a33fbf6a22e9811eef0434
85,058
def apply_activation_backward(backward_pass): """Decorator that ensures that a layer's activation function's derivative is applied before the layer during backwards propagation. """ def wrapper(*args): output_error = args[1] learning_rate = args[2] if args[0].activation: output_error = args[0].activation.backward_propagation(output_error, learning_rate) return backward_pass(args[0], output_error, learning_rate) return wrapper
93af6589d1149b07f3982fd28ff0c5ab3968ee71
85,061
from typing import Iterable def first(iterable: Iterable): """Return the first object in ``iterable``, or None if empty.""" for o in iterable: return o
3d74b54f841d929a914d94e15c3b3ad7ab75772a
85,064
def _get_pool_key(conf): """ Given a ConnectionConfig, return the tuple used as the key in the dictionary of connections by the ConnectionPooler class. """ return (conf.klass, conf.host, conf.port, conf.get_coordinator_host())
240a683f20a403e5ff9567cbe6b2c0e5074948e7
85,067
def calc_accuracy(y_test, prediction): """Accuracy of the prediction. Args: y_test (pandas.DataFrame): Actual classes of test set. prediction (list): Predicted classes of test set. Returns: Accuracy of the prediction. """ count = 0 length = len(y_test) for i in range(length): if prediction[i] == y_test.iloc[i]: count += 1 return count / length
4dd8d8921d15abd72e4b4f8e080d5399279021b4
85,068
def intersects(s1 : set, s2 : set): """Return true if the intersection of s1 and s2 is non-empty.""" if len(s1) > len(s2): s1, s2 = s2, s1 return any(x in s2 for x in s1)
fa46cb51d1b0b688ff2f69bef84dc2ca09b466af
85,073
from pathlib import Path def read_commands(path: Path) -> list[tuple[str, int]]: """Read sub commands from a file.""" commands = [] with open(path, "r") as file: for line in file.readlines(): command, amount = line.split() commands.append((command, int(amount))) return commands
26e81403720eff7cdeb99a3380e40da69872633e
85,075
def getDataPath(directory, timestep): """ Returns file path to vtu file for a particular timestep """ _, domain = directory.split("subdomain_") domain = domain[:-1] filepath = directory + "LSBU_" + str(timestep) + "_" + str(domain) + ".vtu" return filepath
613430b18855fc1c2a491f86938372d4f41347d8
85,080
def should_force_step(step_name, force_steps): """ Check if step_name is in force_steps We support multi-level flags, ex for step_name = "a.b.c", we allow : "a", "a.b", "a.b.c" If one of force_steps is a wildcard (*), it will also force the step """ for step in force_steps: if step == "*" or step == step_name or step_name.startswith(step + "."): return True return False
f1833f280cece085bf02234c4469171260b0ce11
85,081
def common_values(list1, list2): """ Returns the common values of two sorted arrays """ i, j, common, len_a, len_b = 0, 0, 0, len(list1), len(list2) while i < len_a and j < len_b: if list1[i] > list2[j]: j += 1 elif list1[i] < list2[j]: i += 1 else: common += 1 i += 1 j += 1 return common
99f0c487c26ecee48497459557cad4904d8b089f
85,087
import random import string def generate_random_string(length=6): """Generate a random string of upper case letters and digits. Args: length: length of the generated string Returns: the generated string """ return ''.join([ random.choice(string.ascii_uppercase + string.digits) for _ in range(length)])
036a92d4e6ec418e11a9343edb28475cea68e0a2
85,097
def count(arrays, cn): """ Used in conjuction with `get_vals`. Counts the proportions of all_wt, all_mut, or mixed arrays in a given set of `data`, up to a copy number of `cn`. """ all_wt, all_mut, mixed = 0, 0, 0 for array in arrays: if len(array) != cn: continue # homogenous WT if all([i == 0 for i in array]): all_wt += 1 # homogenous H47R elif all([i == 1 for i in array]): all_mut += 1 # "mixed" -- requires array to be longer than 1 copy elif len(set(array)) > 1 and all([i in (0, 1) for i in array]): mixed += 1 return all_wt, mixed, all_mut
07151cc754b461d4f8fd0c562f03d8101e290691
85,098
def finalize_headers(headers, access_token): """ Args: - headers: (dict-like) HTTP headers - access_token: (str) SurveyMonkey access token Returns: (dict) headers updated with values that should be in all requests """ new_headers = dict(headers) new_headers.update({ 'Authorization': 'Bearer {}'.format(access_token), 'Content-Type': 'application/json' }) return new_headers
6357ba9845b191debecd37bacdff90fe6dd3a77a
85,101
def clip_count(cand_d, ref_ds): """Count the clip count for each ngram considering all references""" count = 0 for m in cand_d.keys(): m_w = cand_d[m] m_max = 0 for ref in ref_ds: if m in ref: m_max = max(m_max, ref[m]) m_w = min(m_w, m_max) count += m_w return count
a8e5e4fa89e7a6e8c92dfee68db16554e98520ef
85,106
def add_dicts(a, b): """Add two dictionaries together and return a third.""" c = a.copy() c.update(b) return c
afa72475df4485feabae4acf86ff4d9aead3b523
85,107
def _process_comma_separated_tags(tags): """ Return a list of tags given a string of comma-separated tags. """ return [tag.strip() for tag in tags.strip().split(',') if tag.strip()]
dc9d7a97b4b48b2f6c8df73e58282a35d5069f2a
85,108
def raveler_body_annotations(orphans, non_traversing=None): """Return a Raveler body annotation dictionary of orphan segments. Orphans are labeled as body annotations with `not sure` status and a string indicating `orphan` in the comments field. Non-traversing segments have only one contact with the surface of the volume, and are labeled `does not traverse` in the comments. Parameters ---------- orphans : iterable of int The ID numbers corresponding to orphan segments. non_traversing : iterable of int (optional, default None) The ID numbers of segments having only one exit point in the volume. Returns ------- body_annotations : dict A dictionary containing entries for 'data' and 'metadata' as specified in the Raveler body annotations format [1, 2]. References ---------- [1] https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format and: [2] https://wiki.janelia.org/wiki/display/flyem/generic+file+format """ data = [{'status': 'not sure', 'comment': 'orphan', 'body ID': int(o)} for o in orphans] if non_traversing is not None: data.extend([{'status': 'not sure', 'comment': 'does not traverse', 'body ID': int(n)} for n in non_traversing]) metadata = {'description': 'body annotations', 'file version': 2} return {'data': data, 'metadata': metadata}
7eb8313043155ac03d27a9bfd81bb6139f384983
85,110
from datetime import datetime def dt(msts: int) -> datetime: """ Converts a JavaScript-style timestamp (milliseconds) to a Python datetime. Parameters ---------- msts: int Timestamp to convert Returns ------- :class:`datetime.datetime` Python datetime representing the passed date """ return datetime.fromtimestamp(float(msts) / 1000)
7e25328e31d31464b68798373abff023b633c2f2
85,113
def convert_labels(df, columns=['label'], reverse=False): """ Convert labels in provided columns to numeric values (or back to text). Edits the dataframe in place. """ if reverse: labels_vals = { 0: 'Refuted', 1: 'Supported', 2: 'NotEnoughInfo' } else: labels_vals = { 'Refuted': 0, 'Supported': 1, 'NotEnoughInfo': 2 } for col in columns: df[col] = df[col].apply(lambda x: labels_vals[x]) return df
155e7171811dd4d9c1b819affb4ee7a23b56b5c9
85,115
def GetCppPtrType(interface_name): """Returns the c++ type associated with interfaces of the given name. Args: interface_name: the name of the interface you want the type for, or None. Returns: the c++ type which wayland will generate for this interface, or void* if the interface_name is none. We use "struct foo*" due to a collision between typenames and function names (specifically, wp_presentation has a feedback() method and there is also a wp_presentation_feedback interface). """ if not interface_name: return 'void*' return 'struct ' + interface_name + '*'
ee3c39f4a0164abc9b6233c26ac6110d09b9dfe5
85,116
import time def two_way_communication_with_args(start, end): """Both sends and receives values to & from the main thread. Accepts arguments, puts them on the worker object. Receives values from main thread with ``incoming = yield`` Optionally returns a value at the end """ # do computationally intensive work here i = start while i < end: i += 1 time.sleep(0.1) # incoming receives values from the main thread # while yielding sends values back to the main thread incoming = yield i i = incoming if incoming is not None else i # do optional teardown here return "done"
279cc3152f14b71e9369fb9f91d77b88e37f0709
85,119
def coords_add(coords0, coords1): """ Add two coordinates """ return tuple([x0 + x1 for x0, x1 in zip(coords0, coords1)])
d68f7300e55c0de79dcff7e5bcc5455ae2f7b6ee
85,124
def __fmt_str_quotes(x): """Return a string or list of strings where the input string or list of strings have single quotes around strings""" if isinstance(x, (list, tuple)): return '{}'.format(x) if isinstance(x, str): return "'%s'" % x return str(x)
65216dc277f06e44ce6a48a5034795c3b512fd0d
85,125
def ms_to_kt(val): """ Converts m/s to knots; accepts numeric or string """ try: return float(val) * 1.94384 except (TypeError, ValueError): return val * 1.94384
a09cb575571a9a45982515cacf6c99c35dea0fae
85,126
def nodeFromSectionStr(app, sectionStr, lang="en"): """Find the node of a section string. Compare `tf.core.text.Text.nodeFromSection`. Parameters ---------- sectionStr: string Must be a valid section specficiation in the language specified in *lang*. The string may specify a section 0 level only (book/tablet), or section 0 and 1 levels (book/tablet chapter/column), or all levels (book/tablet chapter/column verse/line). !!! hint "examples" Genesis Genesis 1 Genesis 1:1 P005381 P005381 1 P005381 1:1 lang: string, optional `en` The language assumed for the section parts, as far as they are language dependent. Must be a 2-letter language code. Returns ------- node | error: integer | string Depending on what is passed, the result is a node of section level 0, 1, or 2. If there is no such section heading, an error string is returned. """ api = app.api T = api.T aContext = app.context sep1 = aContext.sectionSep1 sep2 = aContext.sectionSep2 msg = f'Not a valid passage: "{sectionStr}"' msgi = '{} "{}" is not a number' section = sectionStr.split(sep1) if len(section) > 2: return msg elif len(section) == 2: section2 = section[1].split(sep2) if len(section2) > 2: return msg section = [section[0]] + section2 dataTypes = T.sectionFeatureTypes sectionTypes = T.sectionTypes sectionTyped = [] msgs = [] for (i, sectionPart) in enumerate(section): if dataTypes[i] == "int": try: part = int(sectionPart) except ValueError: msgs.append(msgi.format(sectionTypes[i], sectionPart)) part = None else: part = sectionPart sectionTyped.append(part) if msgs: return "\n".join(msgs) sectionNode = T.nodeFromSection(sectionTyped, lang=lang) if sectionNode is None: return msg return sectionNode
999e0352d7fd75d0b4c3310bb399a05d519db413
85,131
import re def is_ip_address(ipaddress: str) -> bool: """Validates that the string is a valid IP address.""" # Make sure ip address is of proper type if not ipaddress or type(ipaddress) is not str: return False # Define ip address regular expression ipaddress_regex = r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' # Match regular expression if not (re.match(r'^' + ipaddress_regex + r'$', ipaddress.lower()) is None): return True return False
1e36409894659ea90037c5e1c3326aa3656522db
85,135
def get_index_mask(file_count): """Get mask for for numbering of pictures""" if file_count < 10: return '%d' else: return '%0' + str(len(str(file_count))) + 'd'
6af65c8b369b62fa5824f22e6e34b60e3bf401e4
85,138
def on_board(position): """Check if position is on board.""" return (0 <= position[0] < 8) and (0 <= position[1] < 8)
69ac0250ddc65beb1eb567d758cd52115391036e
85,139
def is_admin(user): """Returns True if the user is an admin.""" return user.is_staff or user.is_superuser
a94442dc835652ba378de2ac40e2d8711253cabb
85,140
import torch def reparametrize(mu, log_var, device): """ Reparametrize based on input mean and log variance Parameters ---------- mu : torch.tensor The mean. log_var : torch.tensor The log variance. device : str The device on which to put the data. Returns ------- z : torch.tensor The reparametrized value. """ sigma = torch.exp(0.5*log_var) epsilon = torch.rand_like(sigma) z = mu + epsilon*sigma return z.to(device)
c3e01e5fd932843320d27d54993369728cc8eaca
85,142
def iterable(obj): """Return boolean of whether obj is iterable""" try: iter(obj) except TypeError: return False return True
9627a05c93a592ea051a1c38550ded1a9fd09750
85,151
def chunk(line,chunksize,delimiter=None): """Chop a string into 'chunks' no greater than a specified size Given a string, return a list where each item is a substring no longer than the specified 'chunksize'. If delimiter is not None then the chunking will attempt to chop the substrings on that delimiter. If a delimiter can't be located (or not is specified) then the substrings will all be of length 'chunksize'. """ chunks = [] # Loop over and chop up string until the remainder is shorter # than chunksize while len(line) > chunksize: if delimiter is not None: try: # Locate nearest delimiter before the chunksize limit i = line[:chunksize].rindex(' ') except ValueError: # Unable to locate delimiter so split on the chunksize # limit i = chunksize else: i = chunksize chunks.append(line[:i]) line = line[i:] # Append the remainder and return chunks.append(line) return chunks
100cda3ff2381edc5e3cf476cc802cd853796db9
85,152
def find_indent(filename): """ Find the indent type of this file. This function assumes consistant indentation. Args: filename: The filename to look in. Returns: N spaces if indented with 2 or 4 spaces, or a tab char if indented with tabs. """ with open(filename) as f: for l in f: if "\t" in l: return "\t" if " " in l: return " " if " " in l: return " "
c7437e73acab477382568949f9469c9ad6fcf3a1
85,156
def unique_lst(lst, size=None): """ Get the unique values inside a list ---- Parameters: lst (list) : list of values size (int) : number of unique elements the list must contain. By default it is set to None, then all the element of the list will be checked. ---- Returns: unique_lst (list) : list of the unique values in lst """ assert (len(lst) > 0) unique_lst = [] el_num = 0 for el in lst: if el not in unique_lst: unique_lst.append(el) el_num += 1 if size != None and el_num == size: # size limit reached break assert (len(unique_lst) > 0) return unique_lst
03df0a3c349d0549cc19559409596e1282a37661
85,157
import hashlib def compute_file_hash(filename: str, buffer_size: int = 64 * 1024) -> str: """ When we make any modifications to the inputs of a baseline, the baseline should also reflect these changes. Otherwise, we can get into a strange situation of irreproducibility: a "hidden" change to the underlying model may produce different results. To ensure this doesn't happen, we capture a hash of the model that we are using. This way, if the model changes, the baseline will have to change, and we will be able to better track the changes this way. This is akin to: $ sha1sum <filename> """ sha1 = hashlib.sha1() with open(filename, 'rb') as f: data = f.read(buffer_size) while data: sha1.update(data) data = f.read(buffer_size) return sha1.hexdigest()
b0abf2c6d7b20d158cbe3c864731c7b297d79950
85,158
from typing import Dict def merge(source: Dict, destination: Dict) -> Dict: """ Merge given dictionaries recursively. Reference: https://stackoverflow.com/questions/20656135/python-deep-merge-dictionary-data >>> a = { 'a' : { 'b' : { 'd' : 1, 'e' : 2 } } } >>> b = { 'a' : { 'b' : { 'f' : 3, 'g' : 4 } } } >>> merge(a, b) {'a': {'b': {'f': 3, 'g': 4, 'd': 1, 'e': 2}}} """ for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge(value, node) else: if key not in destination: destination[key] = value return destination
6864162bd959e8c44b31c7e5ef0d8047118157bf
85,161
import six import json import pprint def pkdpretty(obj): """Return pretty print the object. If `obj` is JSON, parse and print it. If it is a regular python data structure, pretty print that. Any exceptions are caught, and the return value will be `obj`. Args: obj (object): JSON string or python object Returns: str: pretty printed string """ try: if isinstance(obj, six.string_types): try: obj = json.loads(obj) except Exception: pass # try to dump as JSON else dump as Python try: return json.dumps( obj, sort_keys=True, indent=4, separators=(',', ': '), ) + '\n' except Exception as e: pass if pprint.isreadable(obj): return pprint.pformat(obj, indent=4) + '\n' except Exception: pass return obj
e1c8dd1286627699537322971712ea41d22b82f6
85,163
import requests import json def get_conv_rate(apikey, from_curr, to_curr): """Gets conversion rate between currencies by using API call to service Args: apikey (str): API key for free.currconv.com from_curr (str): 3 letter abbreviation of currency to convert from to_curr (str): 3 letter abbreviation of currency to convert to Returns: float: conversion rate, else 0 if invalid conversion """ # string required for API in determining conversion conv_string = f"{from_curr}_{to_curr}".upper() # Attempt to get conversion rate of args specified conv_rate = requests.get('https://free.currconv.com/api/v7/convert', params={ 'apiKey':apikey, 'compact':'ultra', 'q':conv_string } ) # Return 0 if the conversion rate was invalid, else parse and return float if conv_rate.text == "{}": return 0 else: return float(json.loads(conv_rate.text)[conv_string])
f6f7ee644b839d0a9d7832b0ec2a29ac64b0374b
85,167
def partition(test, sequence): """Split `sequence` into 2 lists: (positive,negative), according to the bool value of test(x), order preserved. `test` is either a 1-arg function; or None, which is equivalent to test=bool. `test` is called only ONCE for each element of `sequence`. """ pos = [] neg = [] test = test if test is not None else bool for x in sequence: if test(x): pos.append(x) else: neg.append(x) return pos, neg
363503f4aa86b13afacf6f43fba0738183c87ba6
85,170
def flatten_chain(chain, burnin): """ Input: an emcee sampler chain and the steps taken during the burnin Output: a flattened chain, ignoring all steps pre-burnin """ ndim = len(chain[0][0]) # number of params being fit return chain[:,burnin:,:].reshape(-1, ndim)
93bdcdcb2eca4b94bb4501d96b1d86a9af3411a1
85,172
def _predict_binary(estimator, X, Z): """Make predictions using a single binary estimator.""" return estimator.predict(X, Z)
16811a6768cf3211447e018f1a3d3054c2858e91
85,173
from typing import List from typing import Tuple def all_pairs(elements: List, inverse: bool = False) -> List[Tuple]: """ :param elements: the list from which the elements are taken. :param inverse: if True also the inverse pairs are added. :return: All pairs found in the input list. """ pairs = [] for key in range(len(elements)): element = elements[key] lower = 0 if inverse else key for successor in elements[lower:]: if element is successor: continue pairs.append((element, successor)) return pairs
3b62b4bbe926b97b0d29f164c7928e6815c6354c
85,174
def check_def_topol_consistency(dic_Cname2Hnames, lipid_top): """Check the consistency between the lipid topology and the def file. Ensure that the carbons in the def file are present in the topology. Ensure that all hydrogens of a given carbon as described in the topology are present in the def file. Parameters ---------- dic_Cname2Hnames : dictionary This dictionary gives the correspondance Cname -> Hname. It is a dictionary of tuples extracted from the def file. lipid_top : dictionary lipid topology for hydrogen. Returns ------- Bool True is it's coherent. False otherwise. """ # Check if carbons in dic_Cname2Hnames keys are all present in the lipid # topology. if not set(dic_Cname2Hnames.keys()).issubset(lipid_top.keys()): miss_atoms = ",".join(set(dic_Cname2Hnames.keys()) - set(lipid_top.keys())) print(f"Some carbons ({miss_atoms}) from the definition file are not" "present in the json file.") return False # For each carbon in topology, make sure all hydrogens attached # are in the def file nb_Hs_expected = {'CH3': 3, 'CH2': 2, 'CH': 1, 'CHdoublebond': 1} for carbon, values in lipid_top.items(): if carbon != "resname": H_type = values[0] nb_Hs_topol = nb_Hs_expected[H_type] if carbon in dic_Cname2Hnames: # Handle partial def file nb_Hs_def = len(dic_Cname2Hnames[carbon]) if nb_Hs_def != nb_Hs_topol: print(f"Carbon {carbon} from the definition file should contains " f"{nb_Hs_topol} hydrogen(s), found {nb_Hs_def}.") return False return True
a4f30e9d69ae8027c849430ed27815b0463d9107
85,176
from pathlib import Path def get_help_file(help_dir, cmd): """ Gets most recently generated helptext for AFNI `cmd` in `help_dir` Parameters ---------- help_dir : str Path to directory with AFNI help files cmd : str AFNI command for which to grab help file Returns ------- fname : str Path to most recently generated help file """ help_dir = Path(help_dir).resolve() help_fnames = sorted(help_dir.glob(cmd + '.????_??_??-??_??_??.help')) try: return help_fnames[-1] except IndexError: raise FileNotFoundError('Cannot find any valid help files for {} in {}' .format(cmd, help_dir.as_posix()))
3c897f6ecb7941c7809b183ccbee449ff742f4a9
85,184
import math def CalculateRelativeChange(before, after): """Returns the relative change of before and after, relative to before. There are several different ways to define relative difference between two numbers; sometimes it is defined as relative to the smaller number, or to the mean of the two numbers. This version returns the difference relative to the first of the two numbers. Args: before: A number representing an earlier value. after: Another number, representing a later value. Returns: A non-negative floating point number; 0.1 represents a 10% change. """ if before == after: return 0.0 if before == 0: return float('nan') difference = after - before return math.fabs(difference / before)
0592c034521ba06ec6889c9179895efcdcca1e77
85,187
def sum_of_squares(num1, num2): """Return the sum of the squares of the two inputs """ sq1 = num1 * num2 sq2 = num1 * num2 sumsq = sq1 + sq2 return sumsq
e2b4a24c4ebfa690ebb76681f0ef40c38bf66021
85,188
def dispense_cash(amount): """ Determine the minimum number of ATM bills to meet the requested amount to dispense Parameters ---------- amount : int The amount of money requested from the ATM Returns ------- int The number of bills needed, -1 if it can't be done """ """ If the amount isn't divisible by 10, then no need to bother cause it won't work Assuming it is divisible by 10, then let's fulfill the request starting with the largest bills and moving to smaller bills """ # Get the number of $500 bills that could be used # Total to dispense - (however much we gave in $500) figure out the 100s # Of what's left, how many $50 bills can we give # Again taking what's left, get the max number of 20s # Finally, if there is anything left it must be a 10 total_bills = 0 if amount % 10 != 0: return -1 # Can't be done, because it has to be a multiple of 10 # How many $500 bills can we dispense b_500 = amount // 500 # The // operator does integer only division - such that 4 // 3 = 1 left_over = amount % 500 # The 'mod' operator says give me the remainder of the division # How many $100 bills can we dispense b_100 = left_over // 100 left_over = left_over % 100 # How many $50 bills can we dispense b_50 = left_over // 50 left_over = left_over % 50 # How many $20 bills can we dispense b_20 = left_over // 20 left_over = left_over % 20 # How many $10 bills can we dispense b_10 = left_over // 10 total_bills = b_500 + b_100 + b_50 + b_20 + b_10 return total_bills
a85f56f900b52f4ddd0bdd93ebd97c1d80889c36
85,197
def get_mask(size: int) -> int: """ Get bit mask based on byte size. :param size: number of bytes to obtain mask for :return: mask of width size """ return (1 << (8 * size)) - 1
39d40dbd9100663ddec5a2ec140ddb1a25f7db73
85,203
def parse_version(s): """ Split a version string into a tuple for comparison Given a version string of the form e.g. "X.Y.Z", return a tuple of the components e.g. (X,Y,Z) Where possible components will be coverted to integers. If the version string is empty then the version number will be set to an arbitrary negative integer. Typically the result from this function would not be used directly, instead it is used to compare two versions, for example: >>> parse_version("2.17") < parse_version("1.8") False Arguments: s (str): version string Returns: Tuple: tuple of the version string """ if s == "": # Essentially versionless; set to an # arbitrarily small integer s = "-99999" items = [] for i in s.split('.'): try: i = int(i) except ValueError: pass items.append(i) return tuple(items)
1d1d6fbf549554b953a53cb97fc46f6e4b4ebc5f
85,204
def index( items ): """Index of the first nonzero item. Args: items: Any iterable object Returns: The index of the first item for which bool(item) returns True. """ for i, item in enumerate(items): if item: return i raise ValueError
0ab57ec0a9375a9cf1ab7f6846a7d2d6767e057f
85,206
def event_to_point(event): """Convert an event to a 3D point""" return (event.latitude, event.longitude, event.year)
39f8a484396e93eb14ab98e5b2b1363c3c262bd5
85,208
def civis_api_formatting(soct_type_map): """ Converts soct_type_map to be readable by Civis API. """ table_columns = [{"name": n, "sql_type": t} for n, t in soct_type_map.items()] return table_columns
275c50bccfdc0d647b8191c2187ab6a11bde98ef
85,211
def serialize_node(node_record): """ Creates a dictionary containing the attributes of a neo4j Node object Necessary to make Node object attributes serializable for JSON conversion :param node_record: Neo4j record containing a Node object :return: Dictionary containing Node attributes """ node_dict = { 'id': node_record.id, 'labels': [], 'properties': {} } # Get labels for label in node_record.labels: node_dict['labels'].append(label) # Get properties for key in node_record.keys(): node_dict['properties'][key] = node_record.get(key) return node_dict
a791f031710ad4094243a42f76d5f2a7234b4964
85,213
def read_in(path): """Read in a file, return the str""" with open(path, "r") as file_fd: return file_fd.read()
e9b407ef45aefd2efaf47fdb729ef71130c4e163
85,216
def sum_rec_halves(x): """ Sums a list using division-in-halves recursion. @type x: iterable @param x: the array to sum @rtype: int @return: the sum of all array values """ n = len(x) if n == 0: return 0 if n == 1: return x[0] # sum of 1st half plus sum of 2nd half return sum_rec_halves(x[:n/2]) + sum_rec_halves(x[n/2:])
850199ab0787c4e763639df32f097443a9c11473
85,219
def with_tied_requests(template_layers_config): """ Enabled tied requests for the last layer in a layered micro-service application. """ layers_config = [ layer_config for layer_config in template_layers_config] layers_config[-2] = \ layers_config[-2]._replace( use_tied_requests=True) return layers_config
4d227c5a7c9fd76d9f55aff0c952d359a29fc3a8
85,221
def split_channels(image): """ Takes a numpy array and splits its' channels into a 3 or 4 tuple (views) :return: A 3 or 4 tuple, with each element containing the corresponding color channel """ if image.shape[2] == 3: return (image[:, :, 0], image[:, :, 1], image[:, :, 2]) elif image.shape[2] == 4: return (image[:, :, 0], image[:, :, 1], image[:, :, 2], image[:, :, 3]) assert False, "Wrong shape: %s" % str(image.shape)
290b2c6d3a6f28193949260e5cd1edc5799f6cd0
85,223
def get_hour_resolution(dataframe): """Aggregate the data to get one-hour resolution, where the number of minutes is 0 and the number of seconds is 0, so each record is on the hour exactly. Output a new dataframe with one-hour resolution. """ hour_res_df = dataframe[(dataframe['time_minutes'] == 0) & (dataframe['time_seconds'] == 0)] print("Number of rows in hour resolution dataframe: ", len(hour_res_df)) return hour_res_df
bcefa9d06d33f4c4774611fe2637a79a15cb8379
85,229
def extract_data(data, dim): """ extract dimension 'dim' (center_x, center_y or width) of 'data' into a list """ result = [] for entry in data: if dim == 'center_x': result.append(entry.center_x) elif dim == 'center_y': result.append(entry.center_y) elif dim == 'width': result.append(entry.width) return result
d2cc3f678a03ab3f781ae7afcf0044f500198f33
85,230
def merge_digits(one, two): """Get which digits have been seen together in one and two.""" seen = {} for i in range(10): seen[i] = one[i] or two[i] return seen
0b50dd3aca0d34745300cb9f64396145f835722e
85,233
def unique_chars(word): """ Returns a sorted string containing the unique characters""" ordered = sorted(word) return "".join(ordered)
3f73215dd123cb73368b52f2b0b63bbe24402bc2
85,234
def LCS(value: str) -> bytes: """ pack a string into a LCS """ return b'\x84' + str.encode(value) + b'\x00'
1b3d30685d72d9afff07ae875247ed5114df977d
85,236
def _is_random_variable(x): """ Check if the matrix x correspond to a random variable. The matrix is considered a random variable if it is a vector or a matrix corresponding to a column vector. Otherwise, the matrix correspond to a random vector. """ return len(x.shape) == 1 or x.shape[1] == 1
dc325f01868ecf676a39e5da8dab4c797cdbddb1
85,245
import io import tarfile def _tarify_contents(file_name: str, contents: str) -> bytes: """ Returns a tarball as a bytestream containing a single file with the given name and contents. Args: file_name: A string corresponding to the name of the file in the tarball. contents: A string corresponding to the Contents of the file in the tarball. Returns: (bytes): Bytes corresponding to the tarball. """ output_bytes = io.BytesIO() tar = tarfile.TarFile(fileobj=output_bytes, mode="w") added_file = tarfile.TarInfo(name=file_name) encoded_string = str.encode(contents) added_file.size = len(encoded_string) tar.addfile(added_file, io.BytesIO(encoded_string)) tar.close() output_bytes.seek(0) return output_bytes.getvalue()
58812e92ae3b831dbdb741b0530f5e94063d1bfc
85,249
def ping4_cmd(ipv4_addr, interval=0.5, count=3, size=56): """Constructs a ping command line to an IPv4 address.""" return 'ping -i {0} -c {1} -s {2} {3}'.format(interval, count, size, ipv4_addr)
40ebe7bbf36750e5945f76744e33c6496e36b2f6
85,251
def get_uniques_in_list_of_lists(input_list): """Generate flat list of unique values in a list of lists. Parameters ---------- input_list : list of lists Object to filter from. Returns ------- List Unique values in input object. """ return list({item for sublist in input_list for item in sublist})
baf25c8539d4c631a165e10b992550c664461c5a
85,253
def exists_in_perso(perso, asn): """ :param perso: personal AS :param asn: These ASn exist in AS_rank dataset :return: True if an ASn from AS_rank dataset exists in personal dataset else returns False """ return asn in perso.asn.values
701760e9d0bbf3317a0bbc8e4da3811b20cdc4b8
85,264
from datetime import datetime def make_paths(global_root_dir, global_results_dir_name, prefix, insert_datetime=True): """Returns paths formed from command line arguments""" global_results_dir = global_root_dir + '/' + global_results_dir_name if insert_datetime: datetime_string = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') else: datetime_string = '' if prefix is None: results_dir_name = datetime_string else: if datetime_string: results_dir_name = prefix + '_' + datetime_string else: results_dir_name = prefix results_dir = global_results_dir + '/' + results_dir_name return global_results_dir, results_dir, results_dir_name
5be6d788bf71c9fc386f8dfe932d4fd9cd0fc2ad
85,265
def converttitled(titledparagraphs): """ Convert titled paragraphs to strings """ string_list = [] for title, paragraphs in titledparagraphs: lines = [] for para in paragraphs: lines.extend(para) string_list.append(b' '.join(lines)) return string_list
184e33292fff162b95f5942f14778979a087d3e9
85,266
def exp(b, n): """Return b to the n. >>> exp(2, 10) 1024 """ if n == 0: return 1 else: return b * exp(b, n-1)
8c7beb8608825929f6e98d098cebb7e405867325
85,273
def parse_s_file(f_name): """ Parses s-file and returns dictionary with 'magnitude' and 'stations' list. """ lines = [] with open(f_name, 'r') as f: lines = f.readlines() if not len(lines): return # Magnitude parse magnitude = float(lines[0][55:59]) magnitude_type = lines[0][59] if magnitude_type != 'L': print(f'In file "{f_name}": unsupported magnitude type "{magnitude_type}"! Skipping..') return # Stations parse th = ' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU VELO AIN AR TRES W DIS CAZ7' stations = [] is_table = False for l in lines[1:]: if is_table: s = l[1:6].strip() if len(s) and s not in stations: stations.append(s) if not is_table and l[:len(th)] == th: is_table = True return {'magnitude': magnitude, 'stations': stations}
ac6861f48845d6c5a72b261c6d542b0c919b51f7
85,274
def isOwned(players,card): """ checks if the selected card is owned by anyone. :param players: The array containing all player objects. :param card: The current position/card that would like to be checked. :return: True or False. """ cardGrp = card.group cardGrpID = card.groupID for i in players: propLocation = i.properties[cardGrp-1] if(propLocation[cardGrpID] == '1'): print("Owned by: " + i.name) return True return False
809df343dfb938ffb388de923a6eeaf6191d0417
85,276
def read_split_file(split_file): """ Read text file with pre-defined split, returning list of examples. One example per row in text file. """ with open(split_file) as f: # file may contain integer indices or string identifiers (e.g. PDB # codes) lines = f.readlines() try: split = [int(x.strip()) for x in lines] except ValueError: split = [x.strip() for x in lines] return split
2430bbee9a80688749fea6ddd10b718eff7bcd0e
85,277
def build_version(release_version: str) -> str: """Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'.""" return release_version.split('-')[0]
06258630ff67d3a16d8642bfc297d9f7196c0c07
85,279
def bisect_env_args(patchset): """Generates arguments for bisect-env to apply the patchset""" args = [] for patch in patchset: args.append(f"--try-pick={patch}") return args
3a79fa261cd6027bcc5cc62dc1b9d1da13f915f3
85,281
def _get_number_of_subtasks(total_num_items, items_per_task): """ Determines number of subtasks that would be generated by _generate_items_for_subtask. This needs to be calculated before the query is executed so that the list of all subtasks can be stored in the InstructorTask before any subtasks are started. The number of subtask_id values returned by this should match the number of chunks returned by the generate_items_for_subtask generator. """ num_subtasks, remainder = divmod(total_num_items, items_per_task) if remainder: num_subtasks += 1 return num_subtasks
194c7b8eb9683e842000606775296deee04594cb
85,283
def do_strip(s): """ Removes all whitespace (tabs, spaces, and newlines) from both the left and right side of a string. It does not affect spaces between words. https://github.com/Shopify/liquid/blob/b2feeacbce8e4a718bde9bc9fa9d00e44ab32351/lib/liquid/standardfilters.rb#L92 """ return str(s).strip()
f192537055e0ca11136c8c7165296d6da6e6e57e
85,287
from datetime import datetime def get_hour_time(input_json, arg1, arg2): """Return a specific HH:MM from a unix time provided.""" datetime_unix = int(input_json[arg1][arg2]) datetime_ts = datetime.fromtimestamp(datetime_unix) return datetime_ts.strftime('%H:%M')
522d451293459dfd1b68ae8cd3127c5f8998ccc3
85,296