content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def mybio_to_pymol_selection(entity): """Transform an :class:`~luna.MyBio.PDB.Entity.Entity` instance into a Pymol selection-expression, which can then be used to select atoms in a Pymol session. Parameters ---------- entity : :class:`~luna.MyBio.PDB.Entity.Entity` An entity to be transformed into a Pymol selection-expression. Returns ------- : str The Pymol selection-expression. Examples -------- First, let's parse a PDB file to work with. >>> from luna.util.default_values import LUNA_PATH >>> from luna.MyBio.PDB.PDBParser import PDBParser >>> pdb_parser = PDBParser(PERMISSIVE=True, QUIET=True) >>> structure = pdb_parser.get_structure("Protein", f"{LUNA_PATH}/tutorial/inputs/3QQK.pdb") Now, let's get the Pymol selection-expression for the chain A. >>> from luna.wrappers.pymol import mybio_to_pymol_selection >>> print(mybio_to_pymol_selection(structure[0]['A'])) chain A Finally, we can get the Pymol selection-expression for the ligand X02. >>> from luna.wrappers.pymol import mybio_to_pymol_selection >>> print(mybio_to_pymol_selection(structure[0]["A"][('H_X02', 497, ' ')])) resn X02 AND res 497 AND chain A """ params = {} if entity.level == 'S' or entity.level == 'M': params[''] = 'all' elif entity.level == 'C': params['chain'] = entity.id elif entity.level == 'R': params['resn'] = entity.resname params['res'] = str(entity.id[1]) + entity.id[2].strip() params['chain'] = entity.get_parent().id elif entity.level == 'A': residue = entity.get_parent() params['id'] = entity.serial_number params['name'] = entity.name params['resn'] = residue.resname params['res'] = str(residue.id[1]) + residue.id[2].strip() params['chain'] = residue.get_parent().id else: return {} if "resn" in params: # Escape characters that can generate problems with Pymol params['resn'] = params['resn'].replace("+", "\\+") params['resn'] = params['resn'].replace("-", "\\-") return (' AND ').join(['%s %s' % (k, str(v)) for k, v in params.items()])
1952af51b67676f2bf19775b52d5f721ae1e62cc
11,362
from sys import modules import unittest def suite(): """ Create the test suite for this module. """ loader = unittest.TestLoader() suite = loader.loadTestsFromModule(modules[__name__]) return suite
1d14f1fa9fd4936ae841be11fe3bd2a37b5e421d
11,363
def update_rooms_slider(area): """ Update rooms slider to sensible values. """ return [1, min(int(area / 30) + 1, 6)]
af721d72b9d122d2d129797453cee2801e45eec9
11,365
def find_changes(vals, threshold=None, change_pct=0.02, max_interval=None): """Returns an array of index values that point at the values in the 'vals' array that represent signficant changes. 'threshold' is the absolute amount that a value must change before being included. If 'threshold' is None, 'change_pct' is used to determine a threshold change by multiplying 'change_pct' by the difference between the minimum and maximum value. If 'max_interval' is provided, it will be the maximum allowed separation between returned index values. This forces some some points to be returned even if no signficant change has occurred. Index 0, pointing at the first value is always returned. """ # special case of no values passed if len(vals) == 0: return [] if threshold is None: # compute the threshold difference that constitutes a change # from the change_pct parameter threshold = (max(vals) - min(vals)) * change_pct if threshold == 0.0: threshold = 0.01 # keep it from showing every point # Start with the first value final_ixs = [0] last_val = vals[0] last_ix = 0 for ix in range(1, len(vals)): v = vals[ix] include_this_index = False if abs(v - last_val) >= threshold: # if the prior value was not included, include it if last_ix != ix - 1: final_ixs.append(ix - 1) include_this_index = True elif max_interval is not None and ix - last_ix >= max_interval: include_this_index = True if include_this_index: final_ixs.append(ix) last_val = v last_ix = ix return final_ixs
e6c66d3636f80ee310dcb7ae790bb30a27598aaa
11,366
import argparse def _key_val_pair(value): """ Type checker to ensure that --field values are of the format key=val """ if '=' not in value: msg = 'values must be of the form `header=field`' raise argparse.ArgumentTypeError(msg) return value
eede8a3975eb58d503419a0d990115c84f72813c
11,367
from pathlib import Path import os def module_to_path(homeassistant_root: Path, module: str) -> Path: """Convert module name to Path.""" if module.endswith(".*"): typed_path = homeassistant_root / Path(module[:-2].replace(".", os.path.sep)) assert typed_path.is_dir(), typed_path else: module = module.replace(".", os.path.sep) typed_path = homeassistant_root / (module + ".py") if not typed_path.is_file(): typed_path = homeassistant_root / module assert typed_path.is_dir() typed_path = typed_path / "__init__.py" return typed_path
3ab08c33922250ffda1d530a4760329410ed1ae6
11,368
from operator import or_ def or_many_condition(condition: list): """ 有时候or能比in产生更好的性能,例如 filter(or_many_condition([UserDB.id == i for i in user_ids])) 会比 filter(UserDB.id.in_(user_ids)) 性能更加好 """ if len(condition) == 0: return True if len(condition) == 1: return condition[0] return or_(*condition)
a707919908c42f3db62a74290a61cd8586c31213
11,369
def data_value(value: str) -> float: """Convert to a float; some trigger values are strings, rather than numbers (ex. indicating the letter); convert these to 1.0.""" if value: try: return float(value) except ValueError: return 1.0 else: # empty string return 0.0
24087c1733be5932428a63811abbb54686836036
11,370
from typing import Tuple import logging async def mark_next_person(queue_array: list) -> Tuple[list, int]: """Mark next person in the queue (transfers current_turn to them). Parameters ---------- queue_array : list This array represents the queue Returns ------- Tuple[list, int] The modified queue_array & the id of the person marked """ for index, member in enumerate(queue_array): if member["current_turn"]: queue_array[index]["current_turn"] = False # shouldnt fail when next person is at the beginning of the # queue next_person_pos = (index + 1) % len(queue_array) queue_array[next_person_pos]["current_turn"] = True return (queue_array, queue_array[next_person_pos]["user_id"]) logging.error("Failed to find next person in the queue") return ([], 0)
e792dbf2ac10f44bda53a6429ba43180bdefd144
11,371
def need_new_decimal_value(old, new, delta=0.001): """ Определяет приблизительным сравнением, необходимо ли записывать новое значение в базу.""" if new is None: return False elif old is None: return True old = float(old) new = float(new) delta = float(delta) try: if old - new / new < delta: return False except ZeroDivisionError: return False return True
0bf496c19555a6f8a878acb57322f266fb470f24
11,372
def xnnpack_optional_gemmlowp_copts(): """Compiler flags to optionally enable Gemmlowp benchmarks.""" return []
1f5648d61235343ebc0e3bd601a004b81e0fb65b
11,373
def costs_matrix(a, b, costs): """ a = 'xyz' b = 'ayzb' m = / a y z b 0 1 2 3 4 x 1 2 3 4 5 y 2 3 2 3 4 z 3 4 3 2 3 m[row][col] -> m[2][4] == 6 """ height = len(a) + 1 width = len(b) + 1 m = [[0] * width for _ in range(height)] for row in range(height): m[row][0] = row * costs.delete for col in range(width): m[0][col] = col * costs.insert for row in range(1, height): for col in range(1, width): north = m[row - 1][col] west = m[row][col - 1] north_west = m[row - 1][col - 1] if a[row - 1] == b[col - 1]: m[row][col] = north_west else: m[row][col] = min( north + costs.delete, west + costs.insert, north_west + costs.change ) if row > 1 and col > 1 and \ a[row - 2] == b[col - 1] and \ a[row - 1] == b[col - 2]: before_two = m[row - 2][col - 2] m[row][col] = min( m[row][col], before_two + costs.swap ) return m
f2845efbe55a12806d3d9b464cbe2c0f8efd8c56
11,374
import time def get_process_id(id): """ get_process_id: generate a string of dmYHMS :param id: process_id :return: unique name of string and dateyeartime """ return id + "-" + str(time.strftime("%d%m%Y%H%M%S"))
22f2c88dc14b89a54651f58b03e16e167fbd640d
11,375
import hashlib import sys import os def import_module(path, module_name=None): """ Import module from path. :param path: full path to module to import :param module_name: name to map module in sys.modules :returns: imported module Based on code from the following: :source: http://stackoverflow.com/questions/1096216/override-namespace-in-python """ if not module_name: m = hashlib.sha1() m.update(path) module_name = m.hexdigest() sys.path.insert(0, os.path.dirname(path)) try: mod_path, ext = os.path.splitext(os.path.basename(path)) module = __import__(mod_path) sys.modules[module_name] = module finally: sys.path.pop(0) return module
a0b2be49149e0b49e45d297ae91a3d53d4bc3558
11,377
from typing import List import subprocess def _container_names_by_prefix(prefix: str) -> List[str]: """Return a list of all containers (running or not) whose names begin with `prefix`. Provide a `docker-compose` project name as a prefix to retrieve containers associated with that project. (We don't use `docker-compose` to do this directly because of our rather complex usage of multiple compose files at a time.) Raises an error if no such containers are found. """ result = subprocess.run( [ "docker", "ps", "--all", "--filter", f"name={prefix}", "--format", "{{.Names}}", ], capture_output=True, text=True, ) containers = result.stdout.split() if not containers: raise ValueError(f"Couldn't find any containers for '{prefix}'") return containers
21794b0be7476444f867d2a6aeba18d47f916ba7
11,378
from typing import Dict from typing import Any def map_key(dictionary: Dict, search: Any) -> Any: """Find and return they key for given search value.""" for key, value in dictionary.items(): if value == search: return key return None
c9e200bb352b68c7468ae0511f11f7eff5e46b52
11,379
def get_requirements(filename): """ Get requirements file content by filename. :param filename: Name of requirements file. :return: Content of requirements file. """ return open("requirements/" + filename).read().splitlines()
3dab39352bb69798e076a9d97077902580f50640
11,380
def is_operator(token): """Checks if token is an operator '-+/*'.""" return token in '-+/*'
24b57d2a299df263c900cc94e2e18a9fc7d026b1
11,382
import itertools def duel(state, agent_1, agent_2, max_game_length=1e99): """Plays two agants against each other""" history = [] agents = itertools.cycle([agent_1, agent_2]) move_number = 0 while not state.game_over and move_number < max_game_length: actor = next(agents) actor.perform_simulations() choice = actor.decision() history.append(actor.tree_root.history_sample()) state.step(choice) agent_1.update_state(choice) agent_2.update_state(choice) move_number += 1 if move_number >= max_game_length: state.winner = state._compute_winner() return history, state.winner
871d2e0ee036878c10c0acb6fc51349a0edcd4da
11,383
def add_date_end(record: dict): """ Function to make ``date_end`` ``date_start`` if ``measure_stage`` is "Lift" Parameters ---------- record : dict Input record. Returns ------- type Record with date_end changed conditionally, or original record. """ if record['measure_stage'] == 'Lift': record['date_end'] = record['date_start'] return(record)
67f6ae063aabdf7a7d456ed0ce9660a75b37b6c2
11,385
def multy(b: int) -> int: """2**b via naive recursion. >>> multy(17)-1 131071 """ if b == 0: return 1 return 2*multy(b-1)
272cd138de2209093c0a32e1c7b2f1c08d0dc2be
11,386
def get_ens(sets): """ Puts all entities appeared in interpretation sets in one set. :param sets: [{en1, en2, ...}, {en3, ..}, ...] :return: set {en1, en2, en3, ...} """ ens = set() for en_set in sets: for en in en_set: ens.add(en) return ens
7f5b59d6f5786ee9506bda72a4ee3f8cdf787c33
11,387
def is_in_group(user, group_name): """ Check if a user in a group named ``group_name``. :param user User: The auth.User object :param group_name str: The name of the group :returns: bool """ return user.groups.filter(name__exact=group_name).exists()
25e51e483b0c18c03f8e897725f07e9e63cc160a
11,389
import os def join_file_path(*args): """ Preprocesses the given values and runs them through os.path.join. """ args = list(args) if args[0] == '': args[0] = '/' for i in range(1, len(args)): # First value can start with / args[i] = args[i].strip('/') return os.path.join(*args)
71e63993ef9a40ced16dafec75442c95229f4537
11,390
def post_process_tree(G): """ Given a networkx graph in the form of a tree, assign sample identities to character states. :param graph: Networkx Graph as a tree :return: postprocessed tree as a Networkx object """ new_nodes = [] new_edges = [] for n in G.nodes: spl = n.split("_") if len(spl) > 2: name = "_".join(spl[1:-1]) new_nodes.append(name) new_edges.append((n, name)) G.add_nodes_from(new_nodes) G.add_edges_from(new_edges) return G
aba4654d174e3bf5e8706ca3e8a8d2bae8461129
11,391
def __data_suppression(indicator_dataframe): """Helper function to reduce data used by the large indicator dataframe""" # Couple columns we just don't need indicator_dataframe.drop(columns = {'SpatialDimType','TimeDimType','DataSourceDimType', 'Date','TimeDimensionValue','TimeDimensionBegin','TimeDimensionEnd'}, inplace = True) # Memory suppression of columns: Numerical indicator_dataframe['TimeDim'] = indicator_dataframe['TimeDim'].astype('Int16') # Note, we have to make it Int16 to support Nulls, rather than int16 indicator_dataframe['ID'] = indicator_dataframe['ID'].astype('int32') # Memory suppression of columns: Object -> Category indicator_dataframe[['IndicatorCode','SpatialDim','Dim1','Dim1Type','Dim2','Dim2Type','Dim3','Dim3Type','DataSourceDim']] = \ indicator_dataframe[['IndicatorCode','SpatialDim','Dim1','Dim1Type','Dim2','Dim2Type','Dim3','Dim3Type','DataSourceDim']].astype('category') return indicator_dataframe
fa9d205c2d5b9ae8a668247fc32abc76c562a9e7
11,394
def strip_email(s: str) -> str: """ Filter which extracts the first part of an email to make an shorthand for user. :param s: Input email (or any string). :type s: str :return: A shorthand extracted. :rtype: str """ return s.split('@', 1)[0]
36ea94b9eb12e0e64b78965c66233578790e6a13
11,395
def name_item_info(mosaic_info, item_info): """ Generate the name for a mosaic metadata file in Azure Blob Storage. This follows the pattern `metadata/quad/{mosaic-id}/{item-id}.json`. """ return f"metadata/quad/{mosaic_info['id']}/{item_info['id']}.json"
db78eb175f6530ace2037ed89cb090591c82147f
11,396
def ptfhost(testbed_devices): """ Shortcut fixture for getting PTF host """ return testbed_devices["ptf"]
25fdbc9520c4cda9719887ff213ccb2b88918dcc
11,397
from datetime import datetime def add_current_time_to_state(state): """Annotate state with the latest server time""" state['currentTime'] = datetime.now().timestamp() return state
f61fa2d7b127fbebb22fc277e16f2f79cdd4acd3
11,398
import time def delay(fn): """Decorator function to delay the execution of a function. The function that use this decorator must have delay as a keyword argument. """ def wrapper(*args, **kwargs): seconds = kwargs.get('delay', 0) time.sleep(seconds) return fn(*args, **kwargs) return wrapper
459fe2741af565225b1d9aee4e8841460d1bff4e
11,400
import re def normalize_rgb_colors_to_hex(css): """Convert `rgb(51,102,153)` to `#336699`.""" regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)") match = regex.search(css) while match: colors = map(lambda s: s.strip(), match.group(1).split(",")) hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors)) css = css.replace(match.group(), hexcolor) match = regex.search(css) return css
e3650f525d1d9c1e7bbf7d5b4161712a7439b529
11,401
def method_codes_to_geomagia(magic_method_codes,geomagia_table): """ Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match. When mutiple codes are matched they are separated with - """ codes=magic_method_codes geomagia=geomagia_table.lower() geomagia_code='0' if geomagia=='alteration_monit_corr': if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes: geomagia_code='1' elif "LP-PI-ALT-SUSC" in codes: geomagia_code='2' elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes: geomagia_code='3' elif "LP-PI-ALT-WALTON" in codes: geomagia_code='4' elif "LP-PI-ALT-TANGUY" in codes: geomagia_code='5' elif "DA-ALT" in codes: geomagia_code='6' #at end to fill generic if others don't exist elif "LP-PI-ALT-FABIAN" in codes: geomagia_code='7' if geomagia=='md_checks': if ("LT-PTRM-MD" in codes) or ("LT-PMRM-MD" in codes): geomagia_code='1:' if ("LP-PI-BT-LT" in codes) or ("LT-LT-Z" in codes): if "0" in geomagia_code: geomagia_code="23:" else: geomagia_code+='2:' geomagia_code=geomagia_code[:-1] if geomagia=='anisotropy_correction': if "DA-AC-AMS" in codes: geomagia_code='1' elif "DA-AC-AARM" in codes: geomagia_code='2' elif "DA-AC-ATRM" in codes: geomagia_code='3' elif "LT-NRM-PAR" in codes: geomagia_code='4' elif "DA-AC-AIRM" in codes: geomagia_code='6' elif "DA-AC" in codes: #at end to fill generic if others don't exist geomagia_code='5' if geomagia=='cooling_rate': if "DA-CR" in codes: #all current CR codes but CR-EG are a 1 but may change in the future geomagia_code='1' if "DA-CR-EG" in codes: geomagia_code='2' if geomagia=='dm_methods': if "LP-DIR-AF" in codes: geomagia_code='1' elif "LT-AF-D" in codes: geomagia_code='1' elif "LT-AF-G" in codes: geomagia_code='1' elif "LT-AF-Z" in codes: geomagia_code='1' elif "LP-DIR-T" in codes: geomagia_code='2' elif "LT-AF-Z" in codes: geomagia_code='2' elif "LP-DIR-M" in codes: geomagia_code='5' elif "LT-M-Z" in codes: geomagia_code='5' if geomagia=='dm_analysis': if "DE-BFL" in codes: geomagia_code='1' elif "DE-BLANKET" in codes: geomagia_code='2' elif "DE-FM" in codes: geomagia_code='3' elif "DE-NRM" in codes: geomagia_code='6' if geomagia=='specimen_type_id': if "SC-TYPE-CYC" in codes: geomagia_code='1' elif "SC-TYPE-CUBE" in codes: geomagia_code='2' elif "SC-TYPE-MINI" in codes: geomagia_code='3' elif "SC-TYPE-SC" in codes: geomagia_code='4' elif "SC-TYPE-UC" in codes: geomagia_code='5' elif "SC-TYPE-LARGE" in codes: geomagia_code='6' return geomagia_code
7939704ba51f8e199b79ddf89611ec63c6ead703
11,404
import torch import math def decode_μ_law(x: torch.Tensor, μ: int = 255) -> torch.Tensor: """ Applies the element-wise inverse μ-law encoding to the tensor. Args: x: input tensor μ: size of the encoding (number of possible classes) Returns: the decoded tensor """ x = x.type(torch.float32) # out = (x + 0.5) * 2. / (μ + 1) out = x / math.ceil(μ / 2) out = torch.sign(out) / μ * (torch.pow(1 + μ, torch.abs(out)) - 1) return out
84b991fdb80b845796ec751756ae56140dfc2f45
11,405
def _get_histogram_data(tracking_data): """Returns the histogram data for the plot. Currently we return the bins and values of the last iteration tracked before this plot. Args: tracking_data (pandas.DataFrame): DataFrame holding the tracking data. Returns: list: Bins of the histogram. list: Mid points of the bins. list: Width of the bins. """ clean_data = tracking_data.GradHist1d.dropna() last_step_data = clean_data[clean_data.index[-1]] vals = last_step_data["hist"] bins = last_step_data["edges"] width = bins[1] - bins[0] mid_points = (bins[1:] + bins[:-1]) / 2 return vals, mid_points, width
18afd4fc320ae67c25beb635144da76a0ca81b5f
11,406
def constant_array(context, builder, ty, pyval): """ Create a constant array (mechanism is target-dependent). """ return context.make_constant_array(builder, ty, pyval)
eefd69b2f8f355e2af61f7636a586dd87a0e0419
11,409
def shape_to_spacing(region, shape, pixel_register=False): """ Calculate the spacing of a grid given region and shape. Parameters ---------- region : list = [W, E, S, N] The boundaries of a given region in Cartesian or geographic coordinates. shape : tuple = (n_north, n_east) or None The number of points in the South-North and West-East directions, respectively. pixel_register : bool If True, the coordinates will refer to the center of each grid pixel instead of the grid lines. In practice, this means that there will be one less element per dimension of the grid when compared to grid line registered (only if given *spacing* and not *shape*). Default is False. Returns ------- spacing : tuple = (s_north, s_east) The grid spacing in the South-North and West-East directions, respectively. Examples -------- >>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 11)) >>> print("{:.1f}, {:.1f}".format(*spacing)) 1.0, 1.0 >>> spacing = shape_to_spacing([0, 10, -5, 1], (14, 11)) >>> print("{:.1f}, {:.1f}".format(*spacing)) 0.5, 1.0 >>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 21)) >>> print("{:.1f}, {:.1f}".format(*spacing)) 1.0, 0.5 >>> spacing = shape_to_spacing( ... [-0.5, 10.5, -5.5, 1.5], (7, 11), pixel_register=True, ... ) >>> print("{:.1f}, {:.1f}".format(*spacing)) 1.0, 1.0 >>> spacing = shape_to_spacing( ... [-0.25, 10.25, -5.5, 1.5], (7, 21), pixel_register=True, ... ) >>> print("{:.1f}, {:.1f}".format(*spacing)) 1.0, 0.5 """ spacing = [] for i, n_points in enumerate(reversed(shape)): if not pixel_register: n_points -= 1 spacing.append((region[2 * i + 1] - region[2 * i]) / n_points) return tuple(reversed(spacing))
bc943f55330c17295bdb078d732c6ebc9f8cecfe
11,410
import math def choose_grid_size(train_inputs, ratio=1.0, kronecker_structure=True): """ Given some training inputs, determine a good grid size for KISS-GP. :param x: the input data :type x: torch.Tensor (... x n x d) :param ratio: Amount of grid points per data point (default: 1.) :type ratio: float, optional :param kronecker_structure: Whether or not the model will use Kronecker structure in the grid (set to True unless there is an additive or product decomposition in the prior) :type kronecker_structure: bool, optional :return: Grid size :rtype: int """ # Scale features so they fit inside grid bounds num_data = train_inputs.numel() if train_inputs.dim() == 1 else train_inputs.size(-2) num_dim = 1 if train_inputs.dim() == 1 else train_inputs.size(-1) if kronecker_structure: return int(ratio * math.pow(num_data, 1.0 / num_dim)) else: return ratio * num_data
1d3e30a6b7b419a1e2fcdc245830aa00d1ba493d
11,411
import math def factors_convert_fract2cartes(cif_data): """ Edge vectors (a, b, c) in fractional coordinate –> (x, y, z) in Cartesian coordinate cos(alpha) = b*c/(|b||c|) cos(beta) = a*c/(|a||c|) cos(gamma) = a*b/(|a||b|) a = (a, 0, 0) b = (bcos(gamma), bsin(gamma), 0) c = (cx, cy, cz) x = La*u + Lb*cos(gamma)*v + Lc*cos(beta)*w y = Lb*sin(gamma)*v + Lc*((cos(alpha)cos(gamma) - cos(alpha))/sin(gamma))*w z = Lc * (sqrt(1 - cos_a**2 - cos_b**2 - cos_g**2 + 2*cos_a*cos_b*cos_g)/sin_g)*w """ # Lengths of the unit cell La = cif_data["_cell_length_a"] Lb = cif_data["_cell_length_b"] Lc = cif_data["_cell_length_c"] # Angles in the unit cell alpha = math.radians(cif_data["_cell_angle_alpha"]) beta = math.radians(cif_data["_cell_angle_beta"]) gamma = math.radians(cif_data["_cell_angle_gamma"]) cos_a = math.cos(alpha) sin_a = math.sin(alpha) cos_b = math.cos(beta) sin_b = math.sin(beta) cos_g = math.cos(gamma) sin_g = math.sin(gamma) ax = La # ay = az = 0 bx = Lb * cos_g by = Lb * sin_g # bz = 0 cx = Lc * cos_b cy = Lc * (cos_a - cos_g*cos_b)/sin_g cz = Lc * math.sqrt(1 - cos_a**2 - cos_b**2 - cos_g**2 + 2*cos_a*cos_b*cos_g)/sin_g # Use the volume to check that we calculated the vectors correctly V = ax * by * cz if abs(V - cif_data["_cell_volume"]) > 0.1: print("WARNING: Volume calculated with the real vectors is not the same as the volume in CIF file.") return({"ax": ax, "ay": 0, "az": 0, "bx": bx, "by": by, "bz": 0, "cx": cx, "cy": cy, "cz": cz})
b60afbbed466efd0f2016e771323d3d56c3ffaf8
11,412
import requests def get(path, params=None, filename=None, keyfile='api-key.txt'): """ Query Illustris API Args: path (str): API query path params: ... filename (str): filename and directory to save content. """ with open('api-key.txt', 'r') as myfile: key=myfile.read() key = key.replace("\n", "") headers = {"api-key": key}; # make HTTP GET request to path r = requests.get(path, params=params, headers=headers); # raise exception if response code is not HTTP SUCCESS (200) r.raise_for_status(); if r.headers['content-type'] == 'application/json': return r.json(); # parse json responses automatically if 'content-disposition' in r.headers: if filename is None: filename = r.headers['content-disposition'].split("filename=")[1]; with open(filename, 'wb') as f: f.write(r.content); return filename; # return the filename string return r;
b2f1226bd66325a012872d3d07d5c10648aa8680
11,413
def compute_ngrams (sentence, n): # OK """la fonction qui renvoie la liste de ngrams à partir d'une phrase""" sentence = sentence.lower() ngrams = zip(*[sentence.split()[i:] for i in range(n)]) return [" ".join(ngram) for ngram in ngrams]
495505f4183d56a5a76339826f8306186191c6b1
11,414
import pandas def cell_id_count(dataframe: pandas.DataFrame, col_name: str) -> pandas.DataFrame: """ Count/tally the unique cell ID's to generate coverage density. In general this could work for any field and was initi. """ counts = dataframe.groupby([col_name])[col_name].agg("count").to_frame("count") return counts.reset_index()
2d6e47efc181e00e102ae51adc85c674a2bf53f4
11,416
def parameter_tuple_parser(parameter_tuple, code_list, relative_base): """ Accepts parameter_tuple, code_list, and relative_base. Returns parameter for use in intcode operation. """ if parameter_tuple[0] == 0: return code_list[parameter_tuple[1]] elif parameter_tuple[0] == 1: return parameter_tuple[1] elif parameter_tuple[0] == 2: return code_list[parameter_tuple[1] + relative_base] else: print('And I oop.... parameter_tuple_parser')
f869240666f2adca0551a3620644023b88930a5a
11,417
def sum_of_factorial_pure_recursion(number: int) -> int: """ >>> sum_of_factorial_pure_recursion(0) 0 >>> sum_of_factorial_pure_recursion(1) 1 >>> sum_of_factorial_pure_recursion(2) 3 >>> sum_of_factorial_pure_recursion(5) 153 """ if number == 1 or number == 0: return number # 1! or 0! elif number == 2: return 3 # 1! + 2! else: return ( sum_of_factorial_pure_recursion(number - 1) + ( sum_of_factorial_pure_recursion(number - 1) - sum_of_factorial_pure_recursion(number - 2) ) * number )
12ca71c011535bcff990f49cf8716aca3ecc5226
11,418
def _compute_padding_to_prevent_task_description_from_moving(unfinished_tasks): """Compute the padding to have task descriptions with the same length. Some task names are longer than others. The tqdm progress bar would be constantly adjusting if more space is available. Instead, we compute the length of the longest task name and add whitespace to the right. Example ------- >>> unfinished_tasks = ["short_name", "long_task_name"] >>> _compute_padding_to_prevent_task_description_from_moving(unfinished_tasks) 14 """ len_task_names = list(map(len, unfinished_tasks)) padding = max(len_task_names) if len_task_names else 0 return padding
82a2fffa0a35036901affe11fdf98d875445ffea
11,420
import math def float_in_range(value: float, lower: float, upper: float) -> bool: """Checks if a float is within a range. In addition to checking if lower < value < upper, checks if value is close to the end points; if so, returns True. This is to account for the vageries of floating point precision. args: value: The value to check. lower: The lower bound of the range. upper: The upper bound of the range. return: _: The value is in the range. """ return ( (lower <= value <= upper) or math.isclose(value, lower) or math.isclose(value, upper) )
e971c082e8019f545c9ccc1024f37d3bab61b64e
11,421
def _parse_int(s, start, end): """Parse a number and check for overflows""" result = 0 i = start while i < end: c = ord(s[i]) if ord("0") <= c <= ord("9"): try: result = result * 10 if result > 1000000000: # this is not going to overflow in CPython raise OverflowError except OverflowError: msg = "too many decimal digits in format string" raise ValueError(msg) result += c - ord("0") else: break i += 1 if i == start: result = -1 return result, i
9719efbdbbd230c26ec5824ca150fbcc1e062057
11,422
import argparse import ast def get_args(): """get args""" parser = argparse.ArgumentParser(description="SSD training") parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend"), help="run platform, only support Ascend.") parser.add_argument("--mindrecord_url", type=str, default=None, help="mindrecord path, default is none.") parser.add_argument("--mindrecord_eval", type=str, default=None, help="mindrecord_eval path, default is none.") parser.add_argument("--eval_callback", type=ast.literal_eval, default=False, help="verify or not during training.") parser.add_argument("--distribute", type=ast.literal_eval, default=False, help="Run distribute, default is False.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--data_url", type=str, default="", help="cocoo 14mini data path.") parser.add_argument("--train_url", type=str, help="path for checkpoint.") parser.add_argument("--run_online", type=str, default=False, help="run online,default is False.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--lr", type=float, default=0.05, help="Learning rate, default is 0.05.") parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") parser.add_argument("--epoch_size", type=int, default=100, help="Epoch size, default is 500.") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--pre_trained", type=str, default="", help="Pretrained Checkpoint file path.") parser.add_argument("--backbone_pre_trained", type=str, default="", help="BackBone Pretrained Checkpoint file path.") parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.") parser.add_argument("--save_checkpoint_epochs", type=int, default=1, help="Save checkpoint epochs, default is 10.") parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") parser.add_argument("--filter_weight", type=ast.literal_eval, default=False, help="Filter head weight parameters, default is False.") parser.add_argument('--freeze_layer', type=str, default="none", choices=["none", "backbone"], help="freeze the weights of network, support freeze the backbone's weights, " "default is not freezing.") parser.add_argument("--data_complete", type=ast.literal_eval, default=True, help="dataset preparation complete.") parser.add_argument("--data_url_raw", type=str, default="", help="coco2017 path.") parser.add_argument("--number_path", type=str, default="", help="number_id_val path") args = parser.parse_args() return args
fbc0d92a70d69a7953583f8540338e793451c150
11,425
def flat_list(source): """Make sure `source` is a list, else wrap it, and return a flat list. :param source: source list :returns: flat list """ if not isinstance(source, (list, tuple)): source = [source] r = [] list(map(lambda x: r.extend(x) if isinstance(x, (list, tuple)) else r.append(x), source)) return r
d118388794cdef72200ad43ab88d9f964b633851
11,426
def git_ref_from_eups_version(version: str) -> str: """Given a version string from the EUPS tag file, find something that looks like a Git ref. """ return version.split("+")[0]
c6a429878c2150ce58b262e9f1fb187226336b0b
11,427
import re def page_to_books(page_content): """Funkcija poišče posamezne knjige, ki se nahajajo v spletni strani in vrne njih seznam""" izraz = re.compile(r'<tr>.*?<td><i><a (.*?)</td></tr>', re.DOTALL) return [m.group(0) for m in re.finditer(izraz, page_content)]
2954ef11dce788b1232976d9b523f9e0b9a69cfc
11,428
def collapse(iter_flow_result:dict)->list: """ Return iter_flow dict result as a single flat list""" nested_lists = [iter_flow_result[k] for k in iter_flow_result.keys()] flat_list = [item for sublist in nested_lists for item in sublist] return(flat_list)
ada0d654ed36df8168b4835fbde3b91b7f56fb72
11,431
import os def parse_settings() -> dict: """ 봇 세팅에 필요한 여러 옵션을 가져옵니다. """ ret = dict() try: ret["BOT_DB_NAME"] = os.environ["BOT_DB_NAME"] ret["BOT_DB_USERNAME"] = os.environ["BOT_DB_USERNAME"] ret["BOT_DB_HOST"] = os.environ["BOT_DB_HOST"] ret["BOT_DB_PORT"] = os.environ["BOT_DB_PORT"] except KeyError as e: raise RuntimeError(f"환경변수 {str(e)}가 존재하지 않습니다.") ret["BOT_DB_PASSWORD"] = os.environ.get("BOT_DB_PASSWORD") try: if ret["BOT_DB_PASSWORD"] is None: filename = os.environ["BOT_DB_PASSWORD_FILE"] with open(filename, "r") as password_file: ret["BOT_DB_PASSWORD"] = password_file.readline().strip() except KeyError as e: raise RuntimeError("DB 비밀번호와 관련된 환경변수가 존재하지 않습니다.") except FileNotFoundError as e: raise RuntimeError("비밀번호에 해당하는 파일을 찾을 수가 없습니다.") ret["BOT_SLACK_TOKEN"] = os.environ.get("BOT_SLACK_TOKEN") try: if ret["BOT_SLACK_TOKEN"] is None: filename = os.environ["BOT_SLACK_TOKEN_FILE"] with open(filename, "r") as password_file: ret["BOT_SLACK_TOKEN"] = password_file.readline().strip() except KeyError as e: raise RuntimeError("슬랙 토큰과 관련된 환경변수가 존재하지 않습니다.") except FileNotFoundError as e: raise RuntimeError("슬랙 토큰에 해당하는 파일을 찾을 수가 없습니다.") return ret
52495ca767fad780c27b428000cecd2862c1c1b3
11,432
import os def lastlines(filename, lines=1, max_line_length=255): """Returns the last n lines of a text file""" with open(filename, 'rb') as f: f.seek(-(lines+1)*max_line_length, os.SEEK_END) endlines = f.read().decode().split('\n') return endlines[-lines:]
f7d4602bceb1ac060f458d8df8e3a34676020963
11,433
def changes(new_cmp_dict, old_cmp_dict, id_column, columns): """Return a list dict of the changes of the rows that exist in both dictionaries User must provide an ID column for old_cmp_dict """ update_ldict = [] same_keys = set(new_cmp_dict).intersection(set(old_cmp_dict)) for same_key in same_keys: # Get the Union of the set of keys # for both dictionaries to account # for missing keys old_dict = old_cmp_dict[same_key] new_dict = new_cmp_dict[same_key] dict_keys = set(old_dict).intersection(set(new_dict)) update_dict = {} for dict_key in columns: old_val = old_dict.get(dict_key, 'NaN') new_val = new_dict.get(dict_key, 'NaN') if old_val != new_val and new_val != 'NaN': if id_column!=None: try: update_dict[id_column] = old_dict[id_column] except KeyError: print("Input Dictionary 'old_cmp_dict' must have ID column") update_dict[dict_key] = new_val if update_dict: update_ldict.append(update_dict) return update_ldict
0b10755dcac69eb11bf47fdd2309308e0d2266c8
11,435
from typing import Union from pathlib import Path def relative_to( path: Union[str, Path], source: Union[str, Path], include_source: bool = True ) -> Union[str, Path]: """Make a path relative to another path. In contrast to :meth:`pathlib.Path.relative_to`, this function allows to keep the name of the source path. Examples -------- The default behavior of :mod:`pathlib` is to exclude the source path from the relative path. >>> relative_to("folder/file.py", "folder", False).as_posix() 'file.py' To provide relative locations to users, it is sometimes more helpful to provide the source as an orientation. >>> relative_to("folder/file.py", "folder").as_posix() 'folder/file.py' """ source_name = Path(source).name if include_source else "" return Path(source_name, Path(path).relative_to(source))
26caa33770617361406915c9b301b5fe1a0ba9ef
11,437
import sys def get_current_platform_dll_extension(): """ Return the dynamic loading library extension for the current platform """ if sys.platform == 'win32': return 'dll' elif sys.platform == 'darwin': return 'dylib' else: return 'so'
fb23730ab975624329ec2ae25cfeb434e1b35a99
11,438
def _normalize_value_string(s): """Flatten value into a single line.""" final_lines = [] for line in s.splitlines(): line = line.strip() # remove line continuation \ if line.endswith('\\'): line = line[:-1] final_lines.append(line) return ''.join(final_lines)
00ce1b63eb4418bd490df754a50d2ad4bbd7618a
11,439
def packing_fraction_state(state): """ Calculates the packing fraction of a state. Parameters ---------- state : :class:`peri.states.ImageState` Returns ------- Float The volume fraction """ return state.get('obj').get()[state.inner].mean()
fc9cc7c3aea2bcb9415aa3591f85a76c6c6a1412
11,440
def get_creds(filename): """ Read credentials from a file. Format is: URL USERNAME PASSWORD Parameters ---------- filename: string path to the file with the credentials Returns ------- list as [URL, USERNAME, PASSWORD] """ f = open(filename, "r") lines = f.readlines() if len(lines) != 3: raise ValueError("Not a valid credentials file.") # strip out newline chars content = [x.rstrip() for x in lines] return content
7b308a2e140a809e0bb11804750370bb4326796d
11,441
def create_path(createFn): """ Decorator to create a path from the path provided """ def decorator(fn): def wrapper(self, *args, create=False): path = fn(self, *args, create=create) if create: createFn(path) return path return wrapper return decorator
e4fecc33e051f0b1384f9b3b41f49fcd938357c9
11,442
def uri_base(uri): """ Get the base URI from the supplied URI by removing any parameters and/or fragments. """ base_uri = uri.split("#", 1)[0] base_uri = base_uri.split("?", 1)[0] return base_uri
8655b8717261dcd419f1c3ac0153ec51d348ca57
11,443
def sequalsci(val, compareto) -> bool: """Takes two strings, lowercases them, and returns True if they are equal, False otherwise.""" if isinstance(val, str) and isinstance(compareto, str): return val.lower() == compareto.lower() else: return False
5e96233ead44608ce70ad95e936c707af8bcb130
11,444
def n_rots(request): """Number of rotations in random layer.""" return request.param
93d55281a83be9e4acba951d1de8a9e7d5081182
11,445
from typing import Any def try_to_cuda(t: Any) -> Any: """ Try to move the input variable `t` to a cuda device. Args: t: Input. Returns: t_cuda: `t` moved to a cuda device, if supported. """ try: t = t.cuda() except AttributeError: pass return t
9b6643f169c1eb8fc65de8b1bff55668f8cba950
11,446
def fixtableforhtml(tablecolumn): """ fix the formatting to make the decimals into percents and add comma seps to long numbers """ new_table = [] for x in tablecolumn: if float(x) < 1: x = str(round(float(x) * 100, 2))+"%" else: x = int(float(x)) x = f"{x:,d}" new_table.append(x) return(new_table)
f57f2ef4145a7389e6160316750e64d0903ddc02
11,447
from typing import Sequence from typing import Iterator def apply(masks: Sequence[int], value: int) -> Iterator[int]: """Apply each bit mask to the given value using bitwise AND. Args: masks: Bit masks to apply. value: Value to mask. Returns: Iterator with each mask independently applied to the value. """ return ((value & mask) for mask in masks)
6eafc88977eea0d50173a4c37c12b43f5c80a870
11,449
def filter_df_for_pred_a(df, list_freq_outliers): """ Exclude frequency outliers according to list and drop all hex bin / time window combinations with 0 RTA's """ df_pred_a = df.loc[~df["h3_zone_6"].isin(list_freq_outliers)] df_pred_a = df_pred_a.reset_index() df_pred_a = df_pred_a.drop(["uid", "latitude", "longitude", "time", "time_window", "time_window_str", "day", "weekday", "month", "half_year", "rainy_season", "year", "date_trunc", "holiday", "time_window_key", "index"], axis=1) df_pred_a.columns = ["datetime", "hex_bins"] return df_pred_a
1115c845593ec2aabf55f6c5d52f4ef8b23fc121
11,450
def group(seq, groupSize, noneFill=True): """Groups a given sequence into sublists of length groupSize.""" ret = [] L = [] i = groupSize for elt in seq: if i > 0: L.append(elt) else: ret.append(L) i = groupSize L = [] L.append(elt) i -= 1 if L: if noneFill: while len(L) < groupSize: L.append(None) ret.append(L) return ret
a7314803cb3b26c9bd69a2dbc2c0594181085c2c
11,451
def strip_comments(contents): """Strips the comments from coq code in contents. The strings in contents are only preserved if there are no comment-like tokens inside of strings. Stripping should be successful and correct, regardless of whether or not there are comment-like tokens in strings. The behavior of this method is undefined if there are any notations which change the meaning of '(*', '*)', or '"'. Note that we take some extra care to leave *) untouched when it does not terminate a comment. """ contents = contents.replace('(*', ' (* ').replace('*)', ' *) ') tokens = contents.split(' ') rtn = [] is_string = False comment_level = 0 for token in tokens: do_append = (comment_level == 0) if is_string: if token.count('"') % 2 == 1: # there are an odd number of '"' characters, indicating that we've ended the string is_string = False elif token.count('"') % 2 == 1: # there are an odd number of '"' characters, so we're starting a string is_string = True elif token == '(*': comment_level += 1 do_append = False elif comment_level > 0 and token == '*)': comment_level -= 1 if do_append: rtn.append(token) return ' '.join(rtn).replace(' (* ', '(*').replace(' *) ', '*)').strip('\n\t ')
2c7ddc9fb3e2981cf297ff37a4bb0b7019247a3a
11,456
def diff(x): """ Computes the approximate local derivative of `x` via finite differencing assuming unit spacing. Can be viewed as the opposite of trapz(). Args: x: The vector-like object (1D np.ndarray, cas.MX) to be integrated. Returns: A vector of length N-1 with each piece corresponding to the difference between one value and the next. """ return x[1:] - x[:-1]
e7e21447f2c745ab8d05f05a9c6c50e24a9aec55
11,457
import random def random_sentence_swap(sentences, p): """Swaps sentences with probability p.""" def swap_sentence(s): idx_1 = random.randint(0, len(s) - 1) idx_2 = idx_1 counter = 0 while idx_2 == idx_1: idx_2 = random.randint(0, len(s) - 1) counter += 1 if counter > 3: return s s[idx_1], s[idx_2] = s[idx_2], s[idx_1] return s new_sentences = sentences.copy() n = int(p * len(sentences)) for _ in range(n): new_sentences = swap_sentence(new_sentences) return new_sentences
5a2fa1a7303dc6fe924803fe6fad2ef5da998733
11,458
def process_not_implemented(**kwargs): """ default process function - return not implemented """ ret_dict = { 'function_return': 'not-implemented', 'slack_response': {} } return ret_dict
c78b7cda6e52471a5839d1231f45ba45d077888b
11,460
import json def create_mapping_dict(total_parsed_results, type_field): """ Create a {'field_name': 'fields_properties'} dict to be used as mapping schemas. Args: total_parsed_results: list. the results from the splunk search query type_field: str. the field that represents the type of the event or alert. Returns: """ types_map = {} for result in total_parsed_results: raw_json = json.loads(result.get('rawJSON', "{}")) event_type_name = raw_json.get(type_field, '') if event_type_name: types_map[event_type_name] = raw_json return types_map
3dd29abbd986a42a2c3a2cab8cd5e40b0b6a65ca
11,463
import csv import numpy def read_file(input_filename, parm): """ Read the given parameter from the given file and return array of data """ # print('Reading', input_filename) infile = open(input_filename) reader = csv.DictReader(infile) vals = [] for row in reader: vals.append(float(row[' ' + parm])) return numpy.array(vals)
af9417ae42a582aae69b5209d6bf73f7e890dc4a
11,464
def not_safe_gerone(a, b, c): """ Compute triangle area from valid sides lengths :param float a: first side length :param float b: second side length :param float c: third side length :return: float -- triangle area >>> not_safe_gerone(3, 4, 5) 6.0 """ p = (a+b+c)/2 return (p*(p-a)*(p-b)*(p-c))**(1/2)
499cf398158d170363ea22096b44ea16e402a23a
11,465
def compName(n1, n2): """ Compare names: n1 n2 strings, blankspace separated names return value between -1 (mismatch) and 1 (match) return None if any of n1, n2 is empty can be used on names, normalised names """ if (not n1) or (not n2): return None nn1 = n1.strip().split() nn2 = n2.strip().split() if (not nn1) or (not nn2): return None if (len(nn1) > len(nn2)): return (2.0 * len(set(nn2).intersection(nn1)) - len(nn2)) / float(len(nn2)) else: return (2.0 * len(set(nn1).intersection(nn2)) - len(nn1)) / float(len(nn1))
7a37261779f38040e0baa3bf99bbaf66569d6962
11,468
def add(cm_response, **data): """ Creates template of VM. Template has a name and some description. It defines VM's hardware parameters: CPU and memory. It also defines number of points utilized by VM created of it (per hour and overly). @clmview_admin_cm @cm_request_transparent{user.add()} """ return cm_response
ff8b29ccb0fa288c018275d09208a0b3cfddfce0
11,469
def change_config(set_attr): """ 因为每次更改属性都要刷新托盘菜单,并且写到配置文件中, 所以定义一个装饰器,以简化代码 :param set_attr: 更改属性的函数 :return: 装饰过的函数 """ def wrapper(plugin, option_text): f = set_attr(plugin, option_text) plugin.save_config() plugin.root.refresh_menu() return f return wrapper
0f497fb83007dbcd353f44a684c5be995821b424
11,470
def sqs_lookup_url(session, queue_name): """Lookup up SQS url given a name. Args: session (Session) : Boto3 session used to lookup information in AWS. queue_name (string) : Name of the queue to lookup. Returns: (string) : URL for the queue. Raises: (boto3.ClientError): If queue not found. """ client = session.client('sqs') resp = client.get_queue_url(QueueName=queue_name) return resp['QueueUrl']
4f7bfb98d10a372b3b07f270e2a25e3227732f41
11,474
def get_government_factions(electoral_term): """Get the government factions for the given electoral_term""" government_electoral_term = { 1: ["CDU/CSU", "FDP", "DP"], 2: ["CDU/CSU", "FDP", "DP"], 3: ["CDU/CSU", "DP"], 4: ["CDU/CSU", "FDP"], 5: ["CDU/CSU", "SPD"], 6: ["SPD", "FDP"], 7: ["SPD", "FDP"], 8: ["SPD", "FDP"], 9: ["SPD", "FDP"], 10: ["CDU/CSU", "FDP"], 11: ["CDU/CSU", "FDP"], 12: ["CDU/CSU", "FDP"], 13: ["CDU/CSU", "FDP"], 14: ["SPD", "BÜNDNIS 90/DIE GRÜNEN"], 15: ["SPD", "BÜNDNIS 90/DIE GRÜNEN"], 16: ["CDU/CSU", "SPD"], 17: ["CDU/CSU", "FDP"], 18: ["CDU/CSU", "SPD"], 19: ["CDU/CSU", "SPD"], } return government_electoral_term[electoral_term]
cfd502f81ebb0536432da4974ab9f25724dc83df
11,475
def VectorVector_scalar(vectorA, vectorB): """ N diemnsional. Return a float as a scalar resulting from vectorA * vectorB""" result = 0 for i in range(0, len(vectorA)): result += vectorA[i] * vectorB[i] return result
c7f0448a05573b8ddba63ed7a8262072ffcda38f
11,476
def describeTreeProductions(tree, links, offset=0): """ Write out the PCFG node productions """ s = "%s ->" % tree.node for ch in tree: s += " %s" % ch.node return s
8f0ec7db35316684441863c8475b6089da0c7e98
11,477
def eval_shift_distance(shift, reg_matches): """ Compute the distance in characters a match has been shifted over. "reg_matches" is the set of regular matches as returned by find_regular_matches(). The distance is defined as the number of characters between the shifted match and the closest regular match. """ mid_matches = sorted(m for m in reg_matches if (m[0] < shift[0] and m[1] > shift[1]) or (m[0] > shift[0] and m[1] < shift[1])) return (-(shift[0] - mid_matches[0][0]) if mid_matches[0][0] < shift[0] else (mid_matches[-1][0] + len(mid_matches[-1][2]) - (shift[0] + len(shift[2]))))
aceb705d0f89cacb6f7738667e6dfaf803bed85c
11,478
def get_common_basename(paths): """ Return common "basename" (or "suffix"; is the last component of path) in list "paths". Or return None if all elements in paths do not share a common last component. Generate an error if any path ends with a slash.""" if len(paths) == 0: return None # get suffix of first element: first_path = paths[0] first_suffix = first_path.rsplit('/', 1)[-1] assert first_suffix !='', "Invalid path, has trailing slash: %s" % first_path match = True for path in paths: prefix, suffix = path.rsplit('/', 1) assert suffix !='', "Invalid path, has trailing slash: %s" % path if suffix != first_suffix: match = False break if not match: # suffixes do not match return None # suffixes match return first_suffix
e2189244ef556038caea24ba6aa85792f34868db
11,479
def even(n: int) -> bool: """ Check that given number is even. >>> [(n, even(n)) for n in range(5)] [(0, True), (1, False), (2, True), (3, False), (4, True)] """ return n % 2 == 0
b96403b1b250560b29aafc253aaa1883f4018002
11,481
def is_owner(org): """ Function for checking if a user's org has ownership rights SINCE WE ARE KEEPING ACCESS CONTROL SIMPLE, WE ARE DEFAULTING THIS TO TRUE :param str org: An organization :return bool: whether the organization matches the current user or the group of admins """ return True # return current_user.organization.organization == 'admins' or current_user.organization.organization == org
ba326c4524cd30038e5a8a1f88aa6b8f80b9a57f
11,482
def percent_yield(actual_yield, theoretical_yield): """Finds theoretical_yield\n actual_yield:\n\tThe yield given in the equation\n theoretical_yield:\n\tThis yield can be calculated with the theoretical_yield() function""" yielded = (actual_yield / theoretical_yield) * 100 return yielded
ceef481ebae8be223bdb9b9c1f2c8a0c8713fcca
11,485
from typing import Union import os from pathlib import Path def make_db_path(in_path: Union[str, os.PathLike, Path]) -> str: """ Returns the provided path resolved and as a String so APSW can understand it. Peewee (or APSW) seems to not follow the `os.PathLike`-Protocol. Args: in_path (Union[str, os.PathLike, Path]) Returns: str: The resolved and normalized path to the db. """ return str(Path(in_path).resolve())
9b34f8cc5de51d5b115b7829463e08465fcaeb62
11,486
def contains_all_required_firewalls(firewall, topology): """ Check that the list of firewall settings contains all necessary firewall settings """ for src, row in enumerate(topology): for dest, col in enumerate(row): if src == dest: continue if col == 1 and (str((src, dest)) not in firewall or str((dest, src)) not in firewall): return False return True
a4dbbb5833909e816c5b7568cf75da68c06d9e4c
11,487
def _get_upload_headers(first_byte, file_size, chunk_size): """Prepare the string for the POST request's headers.""" content_range = 'bytes ' + \ str(first_byte) + \ '-' + \ str(first_byte + chunk_size - 1) + \ '/' + \ str(file_size) return {'Content-Range': content_range}
3ccfec747ce8f8f31b97489005b1ac421f7c9024
11,490
def format_uptime(uptime_in_seconds): """Format number of seconds into human-readable string. :param uptime_in_seconds: The server uptime in seconds. :returns: A human-readable string representing the uptime. >>> uptime = format_uptime('56892') >>> print(uptime) 15 hours 48 min 12 sec """ m, s = divmod(int(uptime_in_seconds), 60) h, m = divmod(m, 60) d, h = divmod(h, 24) uptime_values = [] for value, unit in ((d, 'days'), (h, 'hours'), (m, 'min'), (s, 'sec')): if value == 0 and not uptime_values: # Don't include a value/unit if the unit isn't applicable to # the uptime. E.g. don't do 0 days 0 hours 1 min 30 sec. continue elif value == 1 and unit.endswith('s'): # Remove the "s" if the unit is singular. unit = unit[:-1] uptime_values.append('{0} {1}'.format(value, unit)) uptime = ' '.join(uptime_values) return uptime
a6a4544b6754113d0c5ed13a18838770448be6a8
11,495
def chunker(arrays, chunk_size): """Split the arrays into equally sized chunks :param arrays: (N * L np.array) arrays of sentences :param chunk_size: (int) size of the chunks :return chunks: (list) list of the chunks, numpy arrays""" chunks = [] # sentence length sent_len = len(arrays[0]) # chunk the arrays over the sentence in chunk sizes for pos in range(0, sent_len, chunk_size): chunk = [] for array in arrays: chunk.append(array[pos: pos+chunk_size]) chunks.append(chunk) return chunks
42a6867464e32d59a50927e8e88d067fc86f4fa1
11,497
import requests import json def get_device_response(ctx): """Return the deserialized JSON response for /stat/device (devices)""" r = requests.get( ctx.obj["URL"] + "stat/device", verify=ctx.obj["VERIFY"], cookies=ctx.obj["COOKIES"], ) return json.loads(r.text)
a763e3a5aedb0b2124244424f44fe7aa56bb8963
11,500
def unique_mers(sequencelist, sizeofmer): """ WEV This will create a list of all of the unique Nmers of the object The goal is to have this be flexible enough to take a list or string """ output = [] if isinstance(sequencelist, list) or isinstance(sequencelist, set): for sequence in sequencelist: sizeofsequence = len(sequence) for index in range(sizeofmer, sizeofsequence + 1): output.append(sequence[(index - sizeofmer): index]) #print output #raw_input() return set(output) elif isinstance(sequencelist, str): sizeofsequence = len(sequencelist) for index in range(sizeofmer, sizeofsequence + 1): output.append(sequencelist[(index - sizeofmer): index]) return set(output)
e5e24a968dfe65480bebc2ebeaef39a3510fc4bb
11,501
def parse_context_var(context, variable_name): """解析上下文中的变量 如果以'$'字符开头,那么返回上下文中的对应变量 其它的情况会直接返回字符串 开头两个$$连续为转义,比如'$$aa$$a'为'$aa$$a' :param context 上下文 :param variable_name """ if not isinstance(variable_name, str): return variable_name elif variable_name.startswith("$$"): return variable_name.replace("$$", "$") elif variable_name.startswith("$"): return context[variable_name[1:]] else: return variable_name
d9dbe2a8341179cc5157396a74125b488c0ff053
11,502
def logo_round(v, *args): """ ROUND num outputs the nearest integer to the input. """ return round(v, *args)
fcbf49a40ee95fcbf4c9af62f6b52aed5bfac0a6
11,503
import pickle def get_palette(palette_file): """ load color palette :param palette_file: :return: """ with open(palette_file, 'rb') as fr: color_palette = pickle.load(fr) return color_palette
29d8a661eed7f50d67e108a07017fc93019bfa3c
11,504
import math def calculate_ransac_iterations(min_points_per_sample, outlier_rate, desired_success_probability): """Estimates how many RANSAC iterations you should run. Args: min_points_per_sample: Minimum number of points to build a model hypothesis. outlier_rate: Float, 0-1, how often outliers are expected. desired_success_probability: Float, 0-1 exclusive, desired certainty that the RANSAC run will find the correct model. Higher certainty requires more iterations. Returns: Number of iterations. """ if not 0 < desired_success_probability < 1: raise ValueError('desired_success_probability should fall between 0 and 1, exclusive.') if not 0 <= outlier_rate <= 1: raise ValueError('outlier_rate should fall between 0 and 1.') if min_points_per_sample <= 0: raise ValueError('min_points_per_sample must be a positive integer.') assert 0 < desired_success_probability < 1 return math.ceil(math.log(1 - desired_success_probability) / math.log((1 - (1 - outlier_rate) ** min_points_per_sample)))
cafca93f052770f695f90d5f37e088e880fd0c45
11,506
import csv import logging def load_patterns(file_path): """ loads the patterns from csv file """ patternsDict = {} try: with open(file_path, mode='r') as csvfile: reader = csv.reader(csvfile) patternsDict = {rows[0]: (rows[1], rows[2]) for rows in reader} except FileNotFoundError: logging.error("patterns file not found!") return patternsDict
330472f0507ffc4e9d967ef3027e362e47e0d4d2
11,507