content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import time def _local_time_offset(t=None): """Return offset of local zone from GMT, taking daylight savings into acount.""" t = time.time() if time.localtime(t).tm_isdst and time.daylight: return time.altzone else: return time.timezone
b767e85951f5cbce69c10c3860aa299123ad3694
127,481
def canQuit(targetList): """ Return true if the target list contains at least one valid target """ for t in targetList: try: valid = t['match'] if valid: return True except: pass return False
a2b38d3d1c3113b41d03b8f730c3a74c99a711a5
127,485
import hashlib def encrypt_password(password): """ returns md5-encrypted password :param password: :return: """ hash_value = hashlib.md5(password.encode()).hexdigest() return hash_value
b5a33f2028106e7546e7c3c17d83a2d3c68396e7
127,487
def smooth_parcellation_matrix(parcellation_matrix, smoothing_kernel): """ Perform CSS to smooth a brain atlas parcellation (in the form of a p x v matrix) to a new soft parcellation.. Args: parcellation_matrix: The p x v sparse matrix representation of the atlas. smoothing_kernel: The v x v CSS high-resolution smoothing kernel. Returns: smoothed_parcellation_matrix: The p x v matrix representation of the smoothed soft parcellation. """ return parcellation_matrix.dot(smoothing_kernel)
2f0b5eadc5c86ff97d0f4155c4cf3e2bc9e0921a
127,488
import math def get_speed(velocity_vector): """ Compute the speed of the vehicle in m/s. Args: vehicle: A velocity vector. Returns: The speed of the given vehicle as a float in m/s. """ speed = math.sqrt(velocity_vector.x**2 + velocity_vector.y**2 + velocity_vector.z**2) return speed
c6f3c7e7a7f8a612f5efbaa2d6b5fba1171741eb
127,495
def create_behavior_definition(behavior_id, relationship): """Creates behavior definition from ID and relationship.""" definition = { 'operator': relationship, 'complexAudienceBehavior': { 'behavior': { 'id': behavior_id } } } return definition
592bba089a480647444135d71fe46e68c1b65bf8
127,496
def range2list(str): """Converts a GSB range to a list of intervals 1-3,5-6,9,11 -> [[1,3], [5,6], [9,9], [11,11]] """ r = [] parts = str.split(',') for part in parts: minmax = part.split('-', 2) if len(minmax) == 1: val = int(part[0]) r.append((val, val)) else: r.append((int(minmax[0]), int(minmax[1]))) return r
2b0e813cfc8d18138dfae14debc6d6b6c6f84fc3
127,500
def scale_rewards(reward,min_reward,max_reward,factor=10): """Scales rewards between -1 and 1, with optional factor to increase valuation differences""" span = (max_reward - min_reward) / 2 sub = (max_reward+min_reward) / 2 return ((reward-sub) / span) * factor
4f85208d5c10d1de5493ef3bf0f84ad1493b5067
127,506
def _get_indent(level: int, indent: int = 4, dense: bool = False): """ Given a level and indent, return the preceding indent of an output row :param level: Degree of nesting :param indent: Number of whitespaces at every indent level :param dense: True would print vertical lines at every level :return: String of indent preceding the actual output """ if not dense: one_tab = " " * indent else: one_tab = "|" + " " * (indent - 1) pre_str_indent = one_tab * level + "|- " return pre_str_indent
dfde14ee8a2fe77944c1b283431f9c00c6aa3eda
127,512
def checkEscChars(l_info,ip): """Check for escape character and return either a handler to handle it, or None if there is no escape char.""" if l_info.line[-1] == ip.ESC_HELP \ and l_info.preChar != ip.ESC_SHELL \ and l_info.preChar != ip.ESC_SH_CAP: # the ? can be at the end, but *not* for either kind of shell escape, # because a ? can be a vaild final char in a shell cmd return ip.handle_help elif l_info.preChar in ip.esc_handlers: return ip.esc_handlers[l_info.preChar] else: return None
1983700e54a7cb2c79180f199917e4fb66dfc6e2
127,516
from typing import Dict from typing import List def _reconstruct_optimal_alignment( x: str, y: str, a: int, b: int, pen_gap: Dict[str, Dict[str, int]], dp: List[List[int]], sx_ends_with_gap: List[List[bool]], sy_ends_with_gap: List[List[bool]] ) -> List[str]: """ Private helper function to reconstruct the optimal alignment according to the optimal solution using backtracking. :param x: str :param y: str :param a: int :param b: int :param pen_gap: dict{str: dict{str: int}} :param dp: list[list[int]] :param sx_ends_with_gap: list[list[bool]] :param sy_ends_with_gap: list[list[bool]] :return: list[str] """ sx, sy = '', '' i, j = len(x), len(y) while i >= 1 and j >= 1: x_curr, y_curr = x[i - 1], y[j - 1] result1 = dp[i - 1][j - 1] + pen_gap[x_curr][y_curr] result2 = dp[i - 1][j] + a if not sx_ends_with_gap[i - 1][j]: result2 += b result3 = dp[i][j - 1] + a if not sy_ends_with_gap[i][j - 1]: result3 += b result = min(result1, result2, result3) if result == result1: # Case 1: The final positions are x_i and y_j. sx = x_curr + sx sy = y_curr + sy i -= 1 j -= 1 elif result == result2: # Case 2: The final positions are x_i and a gap. sx = x_curr + sx sy = ' ' + sy i -= 1 else: # Case 3: The final positions are a gap and y_j. sx = ' ' + sx sy = y_curr + sy j -= 1 if i: sy = ' ' * i + sy else: sx = ' ' * j + sx return [sx, sy] # Running time complexity: O(m + n)
01a331524b7b19fde872025be908a640af46e303
127,521
def find_field_with_suffix(val, suffix): """ Return ``val[field]``, where ``field`` is the only one whose name ends with ``suffix``. If there is no such field, or more than one, raise KeyError. """ names = [] for field in val.type.fields(): if field.name.endswith(suffix): names.append(field.name) # if len(names) == 1: return val[names[0]] elif len(names) == 0: raise KeyError("cannot find field *%s" % suffix) else: raise KeyError("too many matching fields: %s" % ', '.join(names))
8ec2fe6d48bd4e8ea3377f6c126292229518b17a
127,523
import json def platform2gates(platform_fname, gates_fname): """Tries to convert an OpenQL platform JSON file to a gatemap JSON file for use with the DQCsim operator wrapper for OpenQL. Heuristics are applied to convert common gate names to DQCsim equivalents, but you may need to do some manual adjusting.""" # Load platform description file. with open(platform_fname, 'r') as f: data = json.loads(f.read()) # Find all instruction names defined in the platform description file. insns = [] for name in data.get('instructions', []): insns.append(name.split()[0]) for name, decomp in data.get('gate_decomposition', {}).items(): insns.append(name.split()[0]) for name in decomp: insns.append(name.split()[0]) if not insns: print('No instructions found!') # Uniquify without losing order. new_insns = [] seen = set() for insn in insns: if insn not in seen: seen.add(insn) new_insns.append(insn) insns = new_insns # Try to map based on the OpenQL names. unknown_gates = set() def to_json_line(openql): dqcsim = { 'i': '"I"', 'x': '"X"', 'y': '"Y"', 'z': '"Z"', 'h': '"H"', 's': '"S"', 'sdag': '"S_DAG"', 't': '"T"', 'tdag': '"T_DAG"', 'x90': '"RX_90"', 'xm90': '"RX_M90"', 'mx90': '"RX_M90"', 'x180': '"RX_180"', 'rx90': '"RX_90"', 'rxm90': '"RX_M90"', 'rx180': '"RX_180"', 'rx': '"RX"', 'y90': '"RY_90"', 'ym90': '"RY_M90"', 'my90': '"RY_M90"', 'y180': '"RY_180"', 'ry90': '"RY_90"', 'rym90': '"RY_M90"', 'ry180': '"RY_180"', 'ry': '"RY"', 'z90': '"RZ_90"', 'zm90': '"RZ_M90"', 'mz90': '"RZ_M90"', 'z180': '"RZ_180"', 'rz90': '"RZ_90"', 'rzm90': '"RZ_M90"', 'rz180': '"RZ_180"', 'rz': '"RZ"', 'swap': '"SWAP"', 'move': '"SWAP"', 'sqswap': '"SQSWAP"', 'sqrtswap': '"SQSWAP"', 'cx': '"C-X"', 'ccx': '"C-C-X"', 'cy': '"C-Y"', 'ccy': '"C-C-Y"', 'cz': '"C-Z"', 'ccz': '"C-C-Z"', 'cphase': '"C-PHASE"', 'ccphase': '"C-C-PHASE"', 'cnot': '"C-X"', 'ccnot': '"C-C-X"', 'toffoli': '"C-C-X"', 'cswap': '"C-SWAP"', 'fredkin': '"C-SWAP"', 'meas': '"measure"', 'measx': '{\n "type": "measure",\n "basis": "x"\n }', 'measy': '{\n "type": "measure",\n "basis": "y"\n }', 'measz': '"measure"', 'prep': '"prep"', 'prepx': '{\n "type": "prep",\n "basis": "x"\n }', 'prepy': '{\n "type": "prep",\n "basis": "y"\n }', 'prepz': '"prep"', }.get( openql .replace('_', '') .replace('-', '') .replace('measure', 'meas') .lower(), None) if dqcsim is None: unknown_gates.add(openql) dqcsim = '{\n UNKNOWN?\n }' openql = '"{}":'.format(openql) return ' {} {},'.format(openql, dqcsim) # Construct the output file. output = ['{'] for insn in insns: output.append(to_json_line(insn)) if output: output[-1] = output[-1][:-1] output.append('}') output = '\n'.join(output) # Write the output file. with open(gates_fname, 'w') as f: f.write(output) # Report result. if unknown_gates: print('The following gates were not automatically recognized:') print() for gate in sorted(unknown_gates): print(' - {}'.format(gate)) print() print('You\'ll need to edit the output file!') else: print('All gates were heuristically recognized! Double-check the file, though.')
4c0c428e7eda3ce524f58beaafe0efdb5be779e0
127,526
import torch def kl_term(mean, scale): """KL divergence between N(mean, scale) and N(0, 1)""" return .5 * (1 - 2 * torch.log(scale) + (mean * mean + scale * scale))
8654619040a6ff138a0f65c2874f136f0759c9c8
127,528
import traceback def collect_exception_python3(exception): """ Collect exception from exception __traceback__. :param exception: Exception from Flask. :return: traceback data """ traceback_data = ''.join(traceback.format_exception( type(exception), exception, exception.__traceback__, )) return traceback_data
7179704f56e901b3083b957e627317b77afdbe7b
127,530
def get_window_span(span, n=1, left=True, right=True): """Get a Span of a window of text containing a span. Args: n (int): Number of tokens on each side of a span to return. Default 1. left (bool): Whether to include the span precedinga span. Default True. right (bool): Whether to include the span following a span. Default True. Returns: a spaCy Span """ if left: start = max((span.start-n, 0)) else: start = span.start if right: end = min((span.end+n, len(span.doc))) else: end = span.end return span.doc[start:end]
17604d19fbeca0a526553e3bdab8dc7c3d85dc46
127,533
def _is_object_with_properties(schema_node): """Returns `True` if `schema_node` is an object schema with properties. An `allOf` schema is also assumed to be an object schema. """ if not isinstance(schema_node, dict): return False return ('allOf' in schema_node or (schema_node.get('type') == 'object' and schema_node.get('properties', {})))
8dd35695a29ed987cf946d29dd59dc981bcce584
127,534
def index_of_nth_base(gappedseq, n): """Return the index of the nth non-gapped base in the sequence where n=0 is the first.""" nongapped = 0 for i, b in enumerate(gappedseq): if b == '-': continue if nongapped == n: return i nongapped += 1 raise ValueError( "Could not find {0}'th non-gapped base in sequence".format(n))
2552adbc19ad528c44a1c9ff013c04789581fb11
127,537
from typing import Iterable def single_item_list(iterable: Iterable) -> bool: """ See: https://stackoverflow.com/a/16801605/7322852 Parameters ---------- iterable: Iterable Returns ------- bool """ iterator = iter(iterable) has_true = any(iterator) # consume from "i" until first true or it's exhausted has_another_true = any( iterator ) # carry on consuming until another true value / exhausted return has_true and not has_another_true
23340a4793bc034794795fd0d4e57c5ec286d4e7
127,543
def get_energy(pos, vel): """Calculates the total energy in the system""" total = 0 for i in range(4): pot, kin = 0, 0 for j in range(3): pot += abs(pos[i][j]) kin += abs(vel[i][j]) total += pot*kin return total
085e9b375757529904083439dfa7344df606867a
127,544
def resolve(obj, attr, fallback=None): """ Resolves obj.attr to a value, calling it as a function if necessary. :param obj: :param attr: a string name of a property or function :param fallback: the value to return if none can be resolved :return: the result of the attribute, or fallback if object/attr not found """ if obj is None: return fallback value = getattr(obj, attr, fallback) if callable(value): return value() return value
943d14fc4584eec1f7e14df52aedce8ce04db096
127,546
def _extract_immediate(node): """Extracts an immediate value from a node. Supported node types: prim::Constant prim::ListConstruct """ # Constant if node.kind() == "prim::Constant": # Try to extract as different types. try: return node.t("value") except RuntimeError: pass try: return node.f("value") except RuntimeError: pass try: return node.i("value") except RuntimeError: pass try: return node.s("value") except RuntimeError: pass return None # List elif node.kind() == "prim::ListConstruct": return [_extract_immediate(i.node()) for i in node.inputs()] else: raise ValueError("Unrecognized immediate input node: {!r}".format(node))
ce35fec81e18ed1aa1e8ce1b2ed4d7c78b348ab9
127,549
def record_clock_sync_marker(syncId: str) -> dict: """Record a clock sync marker in the trace. Parameters ---------- syncId: str The ID of this clock sync marker """ return {"method": "Tracing.recordClockSyncMarker", "params": {"syncId": syncId}}
7d7e18e20fcf8a6141971d7347dc09deaaf859c8
127,552
def get_vocab_path(cfg): """Get path to the list of vocabularies.""" vocab_path = None if not vocab_path: vocab_path = cfg.vocab_path return vocab_path
d2e9673a01906dbeafe04de71460d577f794e2e2
127,557
def list_all_equal(data): """ Checks if all of the elements of a list are the same. Parameters ---------- data : list of 1D The list of elements to compare Returns ------- equivalence: bool True if the elements are all the same Notes ----- Based on https://stackoverflow.com/questions/3844801 """ equivalence = data.count(data[0]) == len(data) return equivalence
ed7a9030cf23128a49e27462dbfed14beba6060e
127,558
def get_center(size, scale, crpix, crval): """Returns the center of the map.""" return scale * (size - 1) / 2 + crval - (crpix - 1) * scale
56fa38456a5d541bcfcddf7a48a95c837d8b62de
127,560
def user_identity_lookup(user): """ Specifies which field will be used to identify the jwt subject """ return user.id
2c23079edaaaa4bbe45ce78b93ac17fabc765730
127,561
def fid_to_filter_ztf(fid: int): """ Convert a fid to a filter name. In the alert data from Fink, the fid corresponds to the 3 different filters used by the ZTF telescope. Parameters ---------- fid : int id of a filter in an alert Returns ---------- filter : str name of the filter """ switcher = {1: "ztfg", 2: "ztfr", 3: "ztfi"} return switcher.get(fid)
cdc9a2738624f98e878eab4d97eda5ade5533d2d
127,575
def get_force_coefficients(*forces, coeff=1.0): """Convert forces to force coefficients. Parameters ---------- forces : tuple of numpy.ndarray objects The forces. coeff : float (optional) The scaling coefficient; default: 1.0. Returns ------- force_coeffs : tuple of numpy.ndarray objects The force coefficients. """ force_coeffs = (coeff * f for f in forces) return force_coeffs
ef6236323b7cbab343a4bd3721e89b0175921307
127,576
from typing import List def polygon2box(polygon: List[List[int]]): """ Converts polygon to bounding box [x_min, y_min, x_max, y_max] Args: polygon: List of x, y pairs [[x0, y0], [x1, y1], .... [x0, y0]] """ min_x, min_y, max_x, max_y = [None] * 4 for item in polygon: x, y = item[0], item[1] if min_x is None or x < min_x: min_x = x if min_y is None or y < min_y: min_y = y if max_x is None or x > max_x: max_x = x if max_y is None or y > max_y: max_y = y bbox = [min_x, min_y, max_x, max_y] assert all([b is not None for b in bbox]), "Empty sequence" return bbox
4287890b9c0b5cb3ba5dbeda861650c0239279aa
127,577
from typing import Any def _custom_boolean_deserialization(value: Any) -> bool: """Deserialize a boolean, allowing for common string or int representations.""" true_values = [1, "1", "true", "True", True] false_values = [0, "0", "false", "False", False] if value in true_values: return True elif value in false_values: return False else: raise ValueError( f"Received unexpected GraphQLBoolean value {value} of type {type(value)}. Expected one " f"of: {true_values + false_values}." )
0ffe9cafa8c55cd1ee7b1b956c4383c67ffd2ff6
127,578
def plot_hist_basic(df, col): """Return a Matplotlib axis object with a histogram of the data in col. Plots a histogram from the column col of dataframe df. Parameters ---------- df: Pandas DataFrame col: str Column from df with numeric data to be plotted Returns ------- ax: Matplotlib axis object """ data = df[col] ax = data.hist(bins=20, normed=1, edgecolor='none', figsize=(10, 7), alpha=.5) ax.set_ylabel('Probability Density') ax.set_title(col) return ax
8930adda938501c80087c4c6eecebefc34d2da28
127,579
def save_fields_to_vocab(fields): """ fields: a dictionary whose keys are field names and whose values are Field objects returns: a list of (field name, vocab) pairs for the fields that have a vocabulary """ return [(k, f.vocab) for k, f in fields.items() if f is not None and 'vocab' in f.__dict__]
173f23c112ca340c8b9d9fecb8c87bdc44f7a1c0
127,583
def distribute_calib_tensors(calib_tensors, calib_cfg, tensor_to_node): """Distributes the tensors for calibration, depending on the algorithm set in the configuration of their nodes. Args: calib_tensors: tensors to distribute. calib_cfg (dict): calibration configuration. tensor_to_node (dict): tensor to node mapping. Returns: tuple: kl tensors and minmax tensors. """ calib_tensors = set(calib_tensors) kl_tensors = {} minmax_tensors = {} for cl in calib_tensors: assert cl in tensor_to_node, '`calib_tensors` entry matched no node. Entry: {}'.format(cl) node = tensor_to_node[cl] if node in calib_cfg['calib_kl_nodes']: kl_tensors[cl] = node if node in calib_cfg['calib_minmax_nodes']: minmax_tensors[cl] = node kl_tensors = set(kl_tensors.keys()) minmax_tensors = set(minmax_tensors.keys()) assert len(kl_tensors & minmax_tensors) == 0, 'same `calib_tensors` entries matched both kl ' \ 'and minmax nodes. Entries: {}'.format(kl_tensors & minmax_tensors) rest = calib_tensors - (kl_tensors | minmax_tensors) assert len(rest) == 0, 'Unexpected `calib_tensors` entries. Entries: {}'.format(rest) return (kl_tensors, minmax_tensors)
a74c73c7d77e60ffbdc30227882750ad9b3a5886
127,584
import re def preprocess(email): """ Preprocess (simplifies) raw email. Args: email (str): Raw e-mail Returns: Processed (simplified) email """ # Make e-mail lower case email = email.lower() # Strip html tags email = re.sub('<[^<>]+>', ' ', email) # Any numbers get replaced with the string 'number' email = re.sub('[0-9]+', 'number', email) # Any word starting with http or https:// replaced with 'httpaddr' email = re.sub('(http|https)://[^\s]*', 'httpaddr', email) # Strings with "@" in the middle are considered emails --> 'emailaddr' email = re.sub('[^\s]+@[^\s]+', 'emailaddr', email) # The '$' sign gets replaced with 'dollar' email = re.sub('[$]+', 'dollar', email) return email
952f6de70700583c3192a1713731897564851583
127,587
from typing import Iterable def count(iterable: Iterable) -> int: """Count items in an iterable, as len doesn't work on generators.""" return sum(1 for x in iterable)
2450ec9bbb46ac1d9f9a53f1905d730f590fc3e0
127,589
def assumed(e, name): """ Return True if the given assumption is true about the sympy expression. Examples -------- >>> from sympy import symbols >>> from sympy.simplify.cse_opts import assumed >>> from sympy.abc import x, y >>> assumed(x+y, 'is_Add') True >>> assumed(x+y, 'is_Mul') False """ return getattr(e, name, False)
019c373b554deb26fdb981a4f1d6e68ac2a65e5f
127,590
def k_to_c(tempe): """Receives a temperature in Kelvin and returns in Celsius""" return tempe - 273.15
d31f5e0dffe759affe25d450ab133e3744624c43
127,594
import threading def thread(target, args=()): """Start thread for parallel processes.""" this = threading.Thread(target=target, args=args) this.daemon = True # will only run if main thread running this.start() return this
07dbad36429559ad2f79acb942b0d4e52202e1bd
127,596
def parse_requirements(filename): """Load requirements from a pip requirements file.""" with open(filename) as f: reqs = (req.strip() for req in f.readlines() if req and not req.startswith("#")) return list(reqs)
7db56fcf4fc57abd2b5b23a3dac82285a5b1fe3b
127,597
def _get_kwargs(parsed_args, arg_group): """ helper function to extract a list of (keyword) arguments to a dictionary """ return {arg[1][2:]: parsed_args.get(arg[1][2:]) for arg in arg_group}
c5c0871b6a2bd6b5e2ff1d2de6d4cb44aa0c228a
127,599
def _ComputeMissingKeys(key_path, options, defaults): """Helper functions to compute which keys a invalid. @param key_path: The current key path (if any) @param options: The user provided options @param defaults: The default dictionary @return: A list of invalid keys """ defaults_keys = frozenset(defaults.keys()) invalid = [] for key, value in options.items(): if key_path: new_path = "%s/%s" % (key_path, key) else: new_path = key if key not in defaults_keys: invalid.append(new_path) elif isinstance(value, dict): invalid.extend(_ComputeMissingKeys(new_path, value, defaults[key])) return invalid
945df7d0dfddcdc4b3f6bc4a6f2833e349e211a0
127,605
def swap_to_dcf(swap_rates): """ Helper function transforms sorted swap rates to discount factors. :param swap_rates: par swap rates :return: discount factors """ num_rates = len(swap_rates) dcf = num_rates * [0] for index, rate in enumerate(swap_rates): if index == 0: dcf[index] = 1. / (1. + rate) else: dcf[index] = (1 - (rate * sum(dcf[0:index + 1]))) / (1. + rate) return dcf
dd5e50b660ff74aaf060e3a973efad8c346ca217
127,611
def get_names(lines): """ Retrieves the names of the sequences in the output. """ next = False names = [] for line in lines: if next: if len(line) == 1: break else: tmp = line.split() names.append(tmp[1]) if line.startswith('Sequences loaded ...'): next = True return names
b28631ba218f6118d7c7cf9220ba632c36e17585
127,613
def uch_yang(xy, d_pipe, gs, rhos, ut): """ Provide functions to solve for voidage and choking velocity based on equations from Yang 1975. Requires SciPy fsolve function. Parameters ---------- xy Solve for ep and uch using SciPy fsolve where ep = voidage [-] and uch = choking velocity [m/s] d_pipe : float Internal pipe diameter [m] gs : float Solids flux [kg/(s m^2)] rhos : float Density of particle [kg/m^3] ut : float Terminal velocity [m/s] Returns ------- f1, f2 : fuctions Functions to solve for ep and uch References ---------- Yang, 1975. A Mathematical Definition of Choking Phenomenon and a Mathematical Model for Predicting Choking Velocity and Choking Voidage. AIChE Journal, 21, 5, 1013-1015. """ ep, uch = xy # define variables g = 9.81 # acceleration due to gravity, m/s^2 f1 = 2 * g * d_pipe * ((ep**-4.7) - 1) - 0.01 * (uch / ep - ut)**2 f2 = gs - (uch / ep - ut) * rhos * (1 - ep) return f1, f2
acc65250d37117ae946dbf5a2f57d4a038916fc2
127,634
def calculate_cpu_mhz(cpu_mhz, previous_time, current_time, previous_cpu_time, current_cpu_time): """ Calculate the average CPU utilization in MHz for a period of time. :param cpu_mhz: The frequency of a core of the physical CPU in MHz. :type cpu_mhz: int :param previous_time: The previous time. :type previous_time: float :param current_time: The current time. :type current_time: float :param previous_cpu_time: The previous CPU time of the domain. :type previous_cpu_time: number :param current_cpu_time: The current CPU time of the domain. :type current_cpu_time: number :return: The average CPU utilization in MHz. :rtype: int,>=0 """ return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \ ((current_time - previous_time) * 1000000000))
177f208f29e1246c26ca7bcfe32d3b96fd580ccb
127,635
def append_dim(arg): """add 1 fake dimension to the end of arg""" return arg.reshape([i for i in arg.shape] + [1])
17002f008bd293a1050aedf9be13ca5eb94289e7
127,639
def processor_nested_arrays(narrays): """Example function for flatten an array of arbitrarily nested arrays of integers into a flat array of integers. Args: narrays: nested arrays. Returns: The return the flat array of integers. """ result_array = [] for value in narrays: if isinstance(value, list): for item in value: if isinstance(item, list): for map_item in item: result_array.append(map_item) else: result_array.append(item) else: result_array.append(value) return result_array
04b7f8cbba76810b6d835b6f931a9b0e25c2e2bb
127,641
def micro_jy_to_dn(flux, zp, err=None): """Convert a flux flux in µJy to DN units of an image. Parameters ---------- flux : array Flux in µJy per pixel. zp : array or scalar Zeropoint for image. err : array Uncertainties in µJy (optional), interpreted as 1-sigma Gaussian uncertainties. """ f = 10. ** (-0.4 * (zp + 48.6) + 6. + 23.) dn = flux / f if err is not None: dn_err = err / f return dn, dn_err else: return dn
175913dced850349bc3686ebd368f89783be19aa
127,651
def get_links(sp_config): """ Returns a list of (resource, pattern) tuples for the 'links' for an sp. """ links = sp_config.get('links', []) if type(links) is dict: links = links.items() return links
166556a9aea33257509eddbc981d0f48a7e7f906
127,657
def vflip(tensor): """Flips tensor vertically. """ tensor = tensor.flip(1) return tensor
ca905415400b35548260abddfbc1bda9f34ae744
127,659
from pathlib import Path import re def global_merge(src: str, dest: str, path: Path): """Merge function for the Ruby microgenerator. Preserves destination changelog and version.rb files, but allows new source content through in all other cases. Args: src: Source gemspec content from gapic dest: Destination gemspec content path: Destination gemspec path Returns: The merged gemspec content. """ if path.name == "CHANGELOG.md": return dest if path.name == "version.rb": regex = re.compile(r'^\s+VERSION = "[\d\.]+"', flags=re.MULTILINE) if regex.search(dest): return dest return src
3aea1096427478326c03c7cb597e331ddf67c3a5
127,661
def maybe_list(l): """Return list of one element if ``l`` is a scalar.""" return l if l is None or isinstance(l, list) else [l]
d2cb2e43e5ac8a74b52b0107637f3f61688a72b0
127,665
def get_doc_by_input_hash(dataset, hash): """Return a doc from a dataset where hash matches doc["_input_hash"] Assumes there will only be one match! """ return [doc for doc in dataset if doc["_input_hash"] == hash][0]
a5fa3974cf1a454c1ebb367e1580994483e0bf65
127,666
def apply_masks(time, flux, flux_err, transit_masks): """ Apply transit masks to the unflattened light curve. Args: time (array): The time array. flux (array): The flux array flux_err (array): The flux_err array. transit_masks (list): A list of transit masks. Returns: masked_time (array): The masked time array. masked_flux (array): The masked flux array. masked_flux_err (array): The masked flux_err array. """ masked_time = time*1 masked_flux, masked_flux_err = flux*1, flux_err*1 for i in range(len(transit_masks)): masked_time = masked_time[~transit_masks[i]] masked_flux = masked_flux[~transit_masks[i]] masked_flux_err = masked_flux_err[~transit_masks[i]] return masked_time, masked_flux, masked_flux_err
c4df6c4fcca0df7de3e344204927515d81320b4d
127,672
import aiohttp async def get(code: str) -> dict: """Look up a deck code.""" async with aiohttp.ClientSession() as session: url = ( "https://shadowverse-portal.com/api/v1/deck/import" f"?format=json&deck_code={code}" ) async with session.get(url) as response: json = await response.json() if json["data"]["errors"]: return {"errors": json["data"]["errors"]} h = json["data"]["hash"] url = ( "https://shadowverse-portal.com/api/v1/deck" f"?format=json&lang=en&hash={h}" ) async with session.get(url) as response: json = await response.json() deck = json["data"]["deck"] return {"hash": h, "deck": deck}
c0c9cf391da6baa125292772fdc7ac5a12053ba7
127,674
def mix(color_1, color_2, weight_2): """ Blend between two colors with a given ratio. :param color_1: first color, as an (r,g,b) tuple :param color_2: second color, as an (r,g,b) tuple :param weight_2: Blend weight (ratio) of second color, 0.0 to 1.0 :return (r,g,b) tuple, blended color """ if weight_2 < 0.0: weight_2 = 0.0 elif weight_2 > 1.0: weight_2 = 1.0 weight_1 = 1.0 - weight_2 return (int(color_1[0] * weight_1 + color_2[0] * weight_2), int(color_1[1] * weight_1 + color_2[1] * weight_2), int(color_1[2] * weight_1 + color_2[2] * weight_2))
6b3fd2da4dee003fabd73a3c4ce10e9515fde8e7
127,678
from typing import Dict from typing import Any from typing import Iterable def check_same_and_different(a: Dict[str, Any], b: Dict[str, Any], same: Iterable[str], different: Iterable[str]) -> bool: """Return true iff for all properties in same that are in a, they exist in b and have the same value, and for all properties in different that are in a, they exist in b and are different. """ same_ok = True for prop in same: if prop in a: if prop not in b or a[prop] != b[prop]: same_ok = False break if not same_ok: return False diff_ok = True for prop in different: if prop in a: if prop not in b or a[prop] == b[prop]: diff_ok = False break return diff_ok
0b4610784950ec1d75f0181e4d23e07adf6bc954
127,680
def starts_with(left: str, right: str) -> str: """Check if the `left` string starts with the `right` substring.""" return left + ' STARTS WITH ' + right
393ed46680d13130eee846e2a7c656f3f989425b
127,682
def two_ints(value): """Type for argparse. Demands 2 integers separated by a comma.""" key, val = value.split(',') return (int(key), int(val))
f9a9f413f8394841961885da830410ce0ac859cd
127,684
def build_from_config(config, registry, default_args=None): """Build a callable object from configuation dict. Args: config (dict): Configuration dict. It should contain the key "name". registry (:obj:`Registry`): The registry to search the name from. default_args (dict, optional): Default initialization argments. """ assert isinstance(config, dict) and 'name' in config assert isinstance(default_args, dict) or default_args is None name = config['name'] name = name.replace('-', '_') obj = registry.get(name) if obj is None: raise KeyError(f'{name} is not in the {registry.name} registry') args = dict() if default_args is not None: args.update(default_args) if 'params' in config: args.update(config['params']) return obj(**args)
89b698e79b014e0b2ac2b05d394b56ba96057d97
127,685
def looks_like_analysis_string(text: str) -> bool: """ Returns true if the cell might be analysis. """ return "${lemma}" in text
9a6834d4a63391e6837e9ee6e39ae3779e4a7e34
127,692
def linfunc(x, a, b): """Linear fitting function of first order.""" return a*x + b
a0827f8427e87eb4f38492d4d5ee4e55a5b5701c
127,693
import gzip def open_mapping_file(mapping_filename): """ Opens the NCBI ID-to-taxonomy mapping file for file reading and returns an open File object. This function will automatically handle gzipped files appropriately. PARAMETERS: mapping_filename (str): the filename of the mapping file RETURNS: mapping_file (File): the mapping file open and ready for reading """ gzip_extensions = ("gz", "gzip") is_gzip = mapping_filename.lower().endswith(gzip_extensions) # Case insensitive matching if is_gzip: mapping_file = gzip.open(mapping_filename, 'rt') else: mapping_file = open(mapping_filename, 'rt') return mapping_file
3bb420a41123ffc1e7a76ea421a46a2ee893a2c1
127,702
def is_threatening(x1: int, y1: int, x2: int, y2: int) -> bool: """ Check if the positions are threatening each other. Examples -------- >>> is_threatening(0, 1, 1, 0) True """ same_row = x1 == x2 same_col = y1 == y2 delta1 = min(x1, y1) major_coords1 = (x1 - delta1, y1 - delta1) delta2 = min(x2, y2) major_coords2 = (x2 - delta2, y2 - delta2) same_diagonal_major = major_coords1 == major_coords2 delta1 = x1 delta2 = x2 minor_coords1 = (x1 - delta1, y1 + delta1) minor_coords2 = (x2 - delta2, y2 + delta2) same_diagonal_minor = minor_coords1 == minor_coords2 same_diagonal = same_diagonal_major or same_diagonal_minor return same_row or same_col or same_diagonal
90779ad3268aeb5eeb1962ac3623904694f5d4bb
127,707
def _interpolate_prc(prc): """Interpolate the precision-recall curve. Replaces precision with interpolated precision: the maximal precision that can be achieved at given or higher recall. Parameters ---------- prc : list of (confidence, precision, recall) Points of precision-recall curve. Returns ------- iprc : list of (confidence, interpolated_precision, recall) """ iprc = [] interpolated_precision = 0 for c, precision, recall in reversed(prc): interpolated_precision = max(precision, interpolated_precision) iprc.append((c, interpolated_precision, recall)) return list(reversed(iprc))
b2e6b4794585b3cd912dbd3433225e05669755af
127,710
def find_tri_div(n): """ Find the smallest triangle number with over `n` divisors. """ if n < 1: return 1 # Start with the 2nd triangular number tri = 3 num = 2 while True: # Start divisor count with 1 and the number itself. div_cnt = 2 i = 2 limit = tri while True: if i >= limit: break if tri % i == 0: div_cnt += 2 limit = int(tri / i) i += 1 if div_cnt > n: return tri num += 1 tri += num
628b1e7a62a5dd438be8ebcdeb5a67800808d024
127,712
def vaporThermalConductivity(T, vTCP): """ vaporThermalConductivity(T, vTCP) vaporThermalConductivity (W/m/K) = A + B*T + C*T^2 Parameters T, temperature in Kelvin vTCP, A=vTCP[0], B=vTCP[1], C=vTCP[2] A, B, and C are regression coefficients Returns vapor thermal conductivity in W/m/K at T """ return vTCP[0] + vTCP[1]*T + vTCP[2]*T**2
249dac125e064335220eed30ea64105064eeb93a
127,713
def get_voc_size(serie): """ return the vocabulary size of the words present in serie """ return len(set([word for words_user in serie for word in words_user]))
2f1fd895c29d1f252d3c05f6038efd5d233d506a
127,714
def hara_utility(x: float, a: float = 1, b: float = 1) -> float: """ Simple hara utility function according to Stott 2006""" return -((b + x) ** a)
9eebbe647e0e4c658ea6030ab32c701d6bfac846
127,718
def construct_console_connect_url(instance_id: str, region: str = "us-east-1") -> str: """Assemble the AWS console instance connect url with the current instance id and region.""" instance_connect_url = f"https://console.aws.amazon.com/ec2/v2/home?region={region}#ConnectToInstance:instanceId={instance_id}" # noqa: E501 return instance_connect_url
dbbb284a1b5bbc7eb75f0646d8516d2cf83f70b1
127,720
import requests def get_product_sku(product_id, size, color=None): """Get product's SKU Args: product_id (str): J.Crew's product ID (as found in the product URL) size (str): The product size, exactly as it appears on the product page color (str): The product color Returns: str: J.Crew SKU for the specific color and size of a product Raises: Exception: The provided arguments are do not correspond to an SKU """ product = requests.get('https://www.jcrew.com/data/v1/US/products/%s' % product_id) product.raise_for_status() product_json = product.json() # Check size exists if size not in product_json['sizesList']: raise Exception("Not a valid size; valid sizes are: %s", ', '.join(product_json['sizesList'])) # Select default color if color is None and len(product_json['colorsList']) == 1: color = product_json['defaultColorCode'] # Check color exists if color not in product_json['colorsMap'].keys(): raise Exception("Not a valid color; valid colors are: %s", ', '.join(product_json['colorsMap'].keys())) return product_json['sizesMap'][size][color]
7a681930fd5bc32731dd940cb57dd0855ae5320c
127,723
def _validateVariant(self): """ Check current variant and return it """ if self.variant is None: self.fatal('No variant!') buildtype = self.variant return buildtype
15100ca4d814fa583dd2fbf68c55d92c510172c3
127,725
def is_number_in_intervals(intervals: list, target: int): """ Traverses intervals and checks if the given number is inside an interval. Example: interval: [[1, 6], [45, 48], [110, 112]] number: 2 return: True """ for interval in intervals: # print(f"interval={interval} type(interval)={type(interval)} target={target} type(target)={type(target)}") if interval[0] <= target <= interval[1]: return True return False
c2c2362b92d3e522d97e23c15fef40cc3a26b4d9
127,730
def branch(path): """ Builds a tuple containing all path above this one. None of the returned paths or the given paths is required to actually have a corresponding Node. Expample: >>> branch("/a/b/c/") ('/', '/a/', '/a/b/', '/a/b/c/') """ li = tuple(p for p in path.split('/') if p) return ('/',)+tuple('/'+'/'.join(li[:c])+'/' for c in range(1,len(li)+1))
60fbc8b622115a8a0f3fee1a9cd481fdafb0751e
127,735
def make_unique(qs, unique_var): """Make the queryset unique by unique_var, sort by unique_var""" if hasattr(qs, "distinct"): # Need to check so that this does not break in Preview mode in Wagtail distinct_pks = qs.distinct(unique_var).order_by(unique_var).values_list('pk', flat=True) return qs.filter(pk__in=distinct_pks) else: return qs
62b1bb9453d6eba885906ec3c67851a75a5f781d
127,736
def keys_as_sorted_list(dict): """ Given a dictionary, get the keys from it, and convert it into a list, then sort the list of keys """ return sorted(list(dict.keys()))
9ee4f999ff185258969fdad64fc7ba48a19ab10e
127,738
import random import string def _temp_name() -> str: """ Generate a temporary name. Prefixes a string of 10 random characters with 'temp_db' & returns it. """ random_letters = [random.choice(string.ascii_lowercase) for _ in range(10)] rnd = "".join(random_letters) tempname = 'temp_db' + rnd return tempname
74f2f89763be3cb9e902bf56cf5b2db85adc6781
127,739
def _feature_label_split(data_df, label_column): """Split the DataFrame into features and label respectively. Args: data_df: (pandas.DataFrame) DataFrame the splitting to be performed on label_column: (string) name of the label column Returns: A Tuple of (pandas.DataFrame, pandas.Series) """ return data_df.loc[:, data_df.columns != label_column], data_df[label_column]
cdb4f8e3110fda42ca8cd0c9916c594332229747
127,740
def solve(lim = 4 * 1000 * 1000): """ Naive solution with a function. :param lim: Max number to sum up to. :returns: The sum of the even Fibo-numbers. """ a, b = 0, 1 sum = 0 while b < lim: if not b % 2: sum += b a, b = b, a + b # <-- tuple assignment to avoid tmp var return sum
70ca663e76ddfe11964be18c7a63fa25bb88403a
127,742
def commajoin(*items): """ Small helper function that joins all items by comma and maps types of items into strings. """ return ",".join(map(str, items))
051de682a0f5588d61a7c0cbf72b0756235d61d0
127,744
from typing import List def getLongestCommonPrefix(strs: List[str]) -> str: """ >>> getLongestCommonPrefix(["abc_aaa","abc_bb","abc_aaaa"]) 'abc_' >>> getLongestCommonPrefix(["b","abc_bb","abc_aaaa"]) '' >>> getLongestCommonPrefix([]) '' """ l = len(strs) if l == 0: return "" else: m = min(map(len, strs)) if m == 0: return "" for i in range(0, m): common = strs[0][i] for s in strs: if s[i] != common: return strs[0][0:i] return strs[0]
23da396c3eb176b8aeb577eed73c5ccd7d1cbc1b
127,750
from pathlib import Path import re import json def word_counter(json_flag, filename): """Counts unique words in a file, can output common or all words. The occurrence of words (case is ignored) is counted. The output is to stdout in a form similar to a histogram or frequency chart. The output is the unique word in the input file followed by the number of occurrences, i.e., "word = {number of occurrences}". One word per line is listed. For the default operation only the most common words are output, which is defined as words that occur with a percentage > 0.1% of the words in the file. With the -json option all words and their frequency are output. This program will not work and will produce incorrect results if run with files with letters with diacritics. Input files are assumed to be English text. Example output to stdout when -json arg is NOT provided: lend - 156 shown - 78 ... Output to stdout when -json arg is provided: {"bleed": 1, "showne": 1, "finis": 1, ...} Args: json_flag (string): If the first argument is "-json" then the output will be a single string of a json list of the words and their frequency. If "-json" is NOT set then the output will be only the most common words in repeated lines of, e.g., "bleed - 1". filename (string): The name of the ASCII or UTF-8 encoded text file to parse. Returns: True for success """ # read in file contents = Path(filename).read_text() contents = contents.lower() # Find words that contain "'"" or "-", or words that do not words = re.findall("(?=\S*['-])[a-z'-]+|[a-z]+", contents) adict = {} # Iterate through words and count occurrences, # save in dictionary with the word as the key. for key in words: adict[key] = adict.get(key, 0) + 1 default_dict = {} len_words = len(words) # Iterate through words and save for default output if the word # occurrence is > 0.1% of the words in the file. if len_words > 0: for key in words: if adict[key]/len_words > 0.001: default_dict[key] = adict[key] # create list of dictionary keys default_keys = default_dict.keys() # output results (adict) to stdout if json_flag: if adict: print(json.dumps(adict)) elif default_dict: # print word and their respective frequency for key in default_keys: print("%s - %d" % (key, default_dict[key])) return True
6954295d4bb6c3672b117db600d2cc5643be197b
127,751
def formathex(n): """ Format 32-bit integer as hexidecimal """ return "0x" + '{:02X}'.format(n)
932a561d8a2aac6be217da26294b43f18525e466
127,758
import hashlib def hashed_id(*parts): """ Create a hashed id from multiple stringifying parts. It is crucial that if any one of the parts changes meaningfully that it's stringification changes too. :param parts: An iterable of objects which can be converted to strings :return: A string which can be used as an id """ hash_object = hashlib.sha1() for part in parts: hash_object.update(str(part).encode()) return hash_object.hexdigest()
278005c890d7a12a7eddb2982ec53654156b7e6c
127,762
def pulse_source_info(source, volume=False, verbose=False): """ Generates a dictionary from the PulseSourceInfo object. :param source: the PulseSourceInfo object to use :type source: pulsectl.PulseSourceInfo :param volume: whether to include the (average) volume across all channels :type volume: bool :param verbose: whether to generate a verbose result :type verbose: bool :return: dictionary of info :rtype: dict """ result = {} if verbose: result['device'] = {} result['device']['name'] = source.name result['device']['description'] = source.description if volume: result['device']['volume'] = source.volume.value_flat if source.port_active is not None: result['port'] = {} result['port']['name'] = source.port_active.name result['port']['description'] = source.port_active.description else: result['device'] = source.description if source.port_active is not None: result['port'] = source.port_active.description if volume: result['volume'] = source.volume.value_flat return result
1802b3e96e7fc98f5ca11046042920011ae61269
127,763
def run(c, *a, **kw): """A Context.run or Connection.run with better defaults""" kw.setdefault("echo", True) kw.setdefault("pty", True) kw.setdefault("replace_env", False) return c.run(*a, **kw)
59baf0c45828f81560d9365515c26d4142fa0d10
127,769
import six def byte_to_bits(b): """Convert a byte into bits """ return [(b >> x) & 1 for x in six.moves.range(7, -1, -1)]
e09cbfa496e154488f333d207682345ed9ba1f60
127,770
from typing import List import re def _replace_docker_images(content: str, images: List[str]) -> str: """Replace docker images in the given string. Find the provided docker images in the given string by docker image name and replace it with the given images (name + tag). """ for image in images: image_name, _ = image.split(":") if image_name in content: content = re.sub(f"{image_name}:\\S*", image, content, count=1) return content
acda00ff22b48d82389f192388be1ecc197c4583
127,772
def calc_dDdc_fn(c, dc, D_fn): """ Computes dD/dc given a functional form to estimate D(c). """ # computes diffusivity at given concentration and one step size away [m^2/s] D1 = D_fn(c) D2 = D_fn(c + dc) # computes dD/dc using forward difference formula [m^2/s / kg/m^3] dDdc = (D2 - D1) / dc return dDdc
f58df5ceaf253e00643e30ca8338496c60c8bd31
127,776
def darpaNodeIP(node_id, octet=0): """Return IP address of radio node on DARPA's network.""" return '192.168.{:d}.{:d}'.format(node_id+100, octet)
36c44dc92771773d7239e915a68255406842677f
127,779
def compare_to_nist(y_comp, y_nist): """ Compute signed relative error based on NIST value. Inputs: y_comp: double, array of values to compare with NIST y_nist: double, NIST reference value Outputs: err: double, relative error """ err = ((y_comp - y_nist) / (1+y_nist)) err[err==0]=1e-30 return err
7725f55ed72265684a42bf062e99cee3041a64e2
127,780
def newPossiFinder(bo,i,j): """Gives the possible Numbers at a position on a sudoku grid. """ pickable = [True for _ in range(10)] pickable[0] = False for k in range(9): pickable[bo[i][k]] = False for k in range(9): pickable[bo[k][j]] = False r = j//3 c = i//3 for row in range(r*3,(r+1)*3): for col in range(c*3,(c+1)*3): pickable[bo[row][col]] = False out=[] for num, value in enumerate(pickable): if value: out.append(num) return out
b4d554b4b8064debf48ba7a17162a02b59f64787
127,781
import copy def Compu_B_Backw(MBackw, n): """Compute possible positions to shift monomials Parameters ---------- MBackw : list monomials to be shifted, for example, [[1], [2], [1]] n : int size of the NLFSR Return ---------- BBackw : list positions to shift monomials, for example, [1, 1, 3] """ BBackw = [] for i in range(len(MBackw)): m = copy.deepcopy(MBackw[i]) b = n - 1 - max(m) - 1 BBackw.append(b) return BBackw
7cdbc1562f1e9368550d7f82b55d1c48eedfe918
127,782
def wsorted(ws): """Sort the letters of a word""" w = list(ws.rstrip()) w.sort() return ''.join(w)
bdc21ba5f249cb1d4bac561e0afcea719d4edee1
127,785
def layoutTelescopeListFileName(name, label): """ File name for files required at the RayTracing class. Parameters ---------- name: str Name of the array. label: str Instance label. Returns ------- str File name. """ fileName = "telescope_positions-{}".format(name) fileName += "_{}".format(label) if label is not None else "" fileName += ".ecsv" return fileName
e9d68eff7e291c2118fa41bd194d5379345589cb
127,787
def filter_composite_from_subgroups(s): """ Given a sorted list of subgroups, return a string appropriate to provide as the a composite track's `filterComposite` argument >>> import trackhub >>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown']) 'dimA dimB' Parameters ---------- s : list A list representing the ordered subgroups, ideally the same list provided to `dimensions_from_subgroups`. The values are not actually used, just the number of items. """ dims = [] for letter, sg in zip('ABCDEFGHIJKLMNOPQRSTUVWZ', s[2:]): dims.append('dim{0}'.format(letter)) if dims: return ' '.join(dims)
7fa30da6fda46e0d8eefa55b38a460bdd2050530
127,788
def hkl_to_xyz(hkl,abasis,bbasis,cbasis): """ Compute reciprocal space coordinates of an hkl given a set of basis vectors @param hkl miller index (tuple) @param abasis vector a @param bbasis vector b @param cbasis vector c @return reciprocal space coordinates of the hkl """ return (hkl[0]*abasis) + (hkl[1]*bbasis) + (hkl[2]*cbasis)
712e3a9bd25e996a49ee3bd71770c442dfb4dbf8
127,791
def mid_point(val_one, val_two): """ :param val_one: lower bound :param val_two: upper bound :return: the mid point of two bounds """ return (val_one*1.0 + val_two*1.0) / 2.0
c5c687b6553e37aa914a45d3dedf8ce70f0f99be
127,792
import tempfile def wrap_binhex(func): """ Convert a function f(in_filename, out_filename) to out_bytes = f(in_bytes) """ def new_func(in_bytes): in_file = tempfile.NamedTemporaryFile() in_file.write(in_bytes) in_file.seek(0) out_file = tempfile.NamedTemporaryFile() func(in_file.name, out_file.name) out_file.seek(0) out_bytes = out_file.read() return out_bytes return new_func
f578b477bfde0def5603efaf9c6b30e8decc2a90
127,795