content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def get_alembic_locations(plugin_dirs): """Returns a tuple with (branchname, plugin_dir) combinations. The branchname is the name of plugin directory which should also be the unique identifier of the plugin. """ branches_dirs = [ tuple([os.path.basename(os.path.dirname(p)), p]) for p in plugin_dirs ] return branches_dirs
62866334a48cd2596eabc0e8eadcee986cbe1155
40,282
def recall(tp,fn): """Computes recall for an array of true positives and false negatives Arguments: tp {[type]} -- True positives fn {function} -- False negatives Returns: [type] -- Recalll """ return tp / (tp+fn+1e-10)
367c896edc0e65c1b2881a76bece8e65c703f9ef
40,284
import pickle def load_df(path): """Loads and returns an object from a pickle file in path Parameters: path (string): Path where the pickle file resides Returns: object: Object in pickle file """ infile = open(path, 'rb') df = pickle.load(infile) infile.close() return df
4399e897ef7ae9d7a2342548f0ad15819ded331e
40,285
def getStratifiedSampleBandPoints(image, region, bandName, **kwargs): """ Function to perform stratitfied sampling of an image over a given region, using ee.Image.stratifiedSample(image, region, bandName, **kwargs) Args: image (ee.Image): an image to sample region (ee.Geometry): the geometry over which to sample bandName (String): the bandName to select for stratification Returns: An ee.FeatureCollection of sampled points along with coordinates """ dargs = { 'numPoints': 1000, 'classBand': bandName, 'region': region } dargs.update(kwargs) stratified_sample = image.stratifiedSample(**dargs) return stratified_sample
c5a8ae26f8b4a76dc2d12e48524698f2d582d08e
40,287
def z2lin(array): """calculate linear values from a array of dBs""" return 10**(array/10.)
e0ed221666398c9ca8488fd20f5e3b0711ad6a7c
40,288
def compute_jaccard_similarity(site_a, site_b): """ Compute the Jaccard similarity between two given ActiveSite instances. Input: two ActiveSite instances Output: the similarity between them (a floating point number) """ a = [i.type for i in site_a.residues] b = [i.type for i in site_b.residues] similarity = 0.0 intersection = len(set(a) & set(b)) union = len(set(a) | set(b)) similarity = float(intersection)/float(union) return similarity
1e3f83e3a98c3e7f658a22f75a5f377efd6529d5
40,289
import pickle def deserialize_users(path): """using the path "path" reads the file with contacts""" with open(path, "rb") as fh: addressbook = pickle.load(fh) return addressbook
57a409d1fecb618a72de37b4752994209ca45a36
40,290
def intersectinmetres(refgeom, testgeom): """ give two geometries which intersect return area in geometry units, and percent coverage """ intersection = refgeom.intersection(testgeom) intersectionarea = intersection.area intersectionpercent = intersection.area / refgeom.area return [intersectionarea, intersectionpercent]
6a6b3a464dc34151a2469255d0f0d877cf89a758
40,291
def words_not_anagrams(word_a, word_b): """Two words are not anagrams.""" if sorted(word_a) != sorted(word_b): return True
2d2230046a59eace95749c82948290f40c157275
40,292
import subprocess def run_minimap2(fastq_in, indexfile, output_paf, threads): """ Runs Minimap2 """ cmd = [ "minimap2", "--cs", "-m8", "-k", "10", "-w", "5", "-B1", "-A6", "--dual=no", "-c", "-t", str(threads), "-o", output_paf, indexfile, fastq_in ] proc = subprocess.run(cmd, check=True) return proc.returncode
ad6671c9467c10f2bdbad8bd0b58bf6852d54dc2
40,293
def images_path(instance, filename): """ Returns path where free parking place images will be stored. """ parking_name = instance.owning_parking.name if not parking_name: parking_name = '.' return 'parking_places/{0}/{1}'.format(parking_name.replace(' ', '_'), filename)
6ea3eb2f8ffbd2af3f729fd7c425a707cccf3c32
40,295
def perc_bounds(perc): """ perc_flt : float or tuple, default None Percentage or tuple of percentages used to filter around reporting irradiance in the irrRC_balanced function. Required argument when irr_bal is True. """ if isinstance(perc, tuple): perc_low = perc[0] / 100 perc_high = perc[1] / 100 else: perc_low = perc / 100 perc_high = perc / 100 low = 1 - (perc_low) high = 1 + (perc_high) return (low, high)
7f355040b6029231e151bd1e279fe69603f057d7
40,297
def stringifyPlayMove(pos, modifiers, name="game"): """ A utility method for automating getting code for playing moves. Helpful for generating code for testing particular cases of moves made. Can be used with things like the GUI or moves made by the QModels to replicate exact board states without having to manually place every piece. Parameters work in the same way as are passed to Game.play and Game.canPlay :param pos: The position of the piece to move :param modifiers: A 3-tuple of the modifiers (left, forward, jump) for the piece to move :param name: The variable name of the game that this play method call will use :return A string representing the code used to make the move """ return name + ".play(" + str(pos) + ", " + str(modifiers) + ")"
2a902ce02627be707664f125346f21d14f333e02
40,298
import os import yaml def load_params(run_dirpath: str) -> dict: """ Loads the run params from the app's data dir """ params_path = os.path.join(run_dirpath, "params.yml") with open(params_path, "r") as f: return yaml.safe_load(f)
0c216ffd15652a4a46c1ef206f889027b9136ce6
40,299
import logging import sys from unittest.mock import call import os def _sort_index_bam(bam, processes=6, rm_ori=False): """ From a .bam file, sort and index it. Remove original if rm_ori Return path of sorted and indexed bam """ if bam[-4:] != '.bam': logging.error('Bam file needs to end in .bam') sys.exit() if 'sorted.bam' not in bam: logging.info("sorting {0}".format(bam)) sorted_bam = bam[:-4] + '.sorted.bam' cmd = ['samtools', 'sort', bam, '-o', sorted_bam, '-@', str(processes)] print(' '.join(cmd)) call(cmd) else: sorted_bam = bam rm_ori = False logging.info("Indexing {0}".format(sorted_bam)) cmd = ['samtools', 'index', sorted_bam, sorted_bam + '.bai', '-@', str(processes)] print(' '.join(cmd)) call(cmd) if rm_ori: logging.info("Deleting {0}".format(bam)) os.remove(bam) return sorted_bam
b8a9aca39832b48e468bcd564d63a101950df297
40,300
import os def ls_newest(directory, extensions): """ Function to return the most-recently-modified of a given extension in a given directory Parameters ---------- directory: str extension: iterable Returns ------- full_path_to_file: str or None if none found """ ls = [ os.path.join( directory, d ) for d in os.listdir( directory ) if any([d.endswith( extension.lstrip('.').lower() ) for extension in extensions]) ] ls.sort(key=lambda fp: os.stat(fp).st_mtime) try: return(ls[-1]) except IndexError: # pragma: no cover return(None)
1814a9e9a35967e7c9ae7ee3276f244bb138dd79
40,301
from typing import Counter def get_piece(turn, grid): """Counts the current piece on the grid :param turn: "X" or "O" :param grid: A 2-dimensional 7x7 list :return: Number of pieces of "turn" on the grid """ grid_combined = [] for row in grid: grid_combined += row counter = Counter(tuple(grid_combined)) return counter[turn]
1c7fda238ddba6d2620b5dfdee1b4173f606ede6
40,302
def get_distance(p1, p2): """It finds the minimum distance between two Points Parameters ---------- p1 : shapely geometric object The first point p2 : shapely geometric object The second point Returns ------- list Returns the minimum distance. The value follows the geometric object projection. """ dist = 5000 try: dist = min(dist, p1.distance(p2)) except TypeError as err: print(f'{err}') return [dist]
2bcfdc62b25e286d1a1d46c27f058e8da3e722e9
40,305
import functools import torch def via_numpy(func): """Decorator for piping a function through numpy arrays, and then giving the result back to torch. A bit of non-riguous testing showed that this adds overhead on the order of microseconds.""" @functools.wraps(func) def wrapped(*args): args = [a.data.numpy() for a in args] result = func(*args) if not isinstance(result, (tuple, list)): return torch.as_tensor(result) return tuple(torch.as_tensor(r) for r in result) return wrapped
ab90ffda6b69beb7ec092b7378dfe8055990248c
40,307
def is_proxy(obj): """ Return True if `obj` is an array proxy """ try: return obj.is_proxy except AttributeError: return False
871c163b30ccc1b31b435c9baac5c0d6063d271e
40,308
def is_key_all_values_equal(list_of_dict, key, value): """ Check if all values of a key are equal to the specified value. """ for d in list_of_dict: if d[key] != value: return False return True
c8e303ffd3f9de4f065ba0bd52d67e1a6c1f8708
40,309
def add_space_separation(funql): """Split funql and join with space separator.""" separators = "()," buffer = "" symbols = [] for char in funql: if char in separators: if buffer: symbols.append(buffer) buffer = "" symbols.append(char) else: buffer += char if buffer: symbols.append(buffer) return " ".join(symbols)
75c7d6d53380287833445787d3e2505427cab03e
40,310
from typing import List from typing import Any def check_for_optional_param(keys: List[str], subcase: Any, msg: str, error: Any, log: Any, ierror: int, nerrors: int) -> int: """one or more must be True""" if not any(subcase.has_parameter(*keys)): msg = 'Must have one of %s\n%s' % (str(keys), msg) log.error(msg) if ierror == nerrors: raise error(msg) ierror += 1 return ierror
489522a11ab5e5bb0cda1a64f6b64ad86e9c5092
40,311
import argparse def cmdLineParser(): """This function parses the command line parameters and arguments. Args: None Returns: Parsed arguments. """ parser = argparse.ArgumentParser(description = 'RJ Hunter - Hunt your favorite RJ Works.') parser.add_argument('--url', help = 'URL you are about to hunt.') parser.add_argument('--category', help = 'Hunt by category.') parser.add_argument('--keyword', help = 'Hunt by search request.') parser.add_argument('--file', help = 'Hunt by file. File should only containe one URL per line.') return parser.parse_args()
6313a75863dfc03d0c619723cdc68835d2d69513
40,312
def celsius_to_fahrenheit(celsius_temp): """Calculate fahrenheit temperature from celsius PARAMETERS ---------- celsius_temp : float A temperature in degrees RETURNS ------- temperature : float """ # apply formula return (celsius_temp * (9/5)) + 32
c6674816a463d022da8863e0f8fea78dd57c1a22
40,314
def extrae_coords_atomo(res,atomo_seleccion): """ De todas las coordenadas atomicas de un residuo, extrae las de un atomo particular y devuelve una lista con las X, Y y Z de ese atomo.""" atom_coords = [] for atomo in res.split("\n"): if(atomo[12:16] == atomo_seleccion): atom_coords = [ float(atomo[30:38]), float(atomo[38:46]), float(atomo[46:54]) ] return atom_coords
ae31f08ab25fffb4ff453710eea4770b72f2d565
40,316
from functools import reduce def merge_dicts(list_of_dicts): """ This will merge a list of dicts. Any duplicate keys will end up with the last value seen. """ return reduce(lambda a, d: a.update(d) or a, list_of_dicts, {})
68c2d67c97f2276c31b4932c34e31034b1fe3213
40,317
def sequence_df2array(df, n_frames, n_agents): """ 0) reset index on frame_id to be a column, then group by both columns 1) make multi index with frame_id and then group by both indices - but these only group by, and we need to use the values as indices in ndarray - instead: reset index and sort by both columns to prepare for reshape :param df: [frame_id(index) agent_id x y] with n_frames * n_agents coordinates :param n_frames: :param n_agents: :return: n_frames, n_agents, 2 """ arr = df.reset_index().sort_values(by=['frame_id', 'agent_id']).values[:, 2:] return arr.reshape((n_frames, n_agents, 2))
d842780c10a6c9bb5471faf58b3aab6e2ffcdd90
40,319
import os def dump_module_doc(module): """Dumps module docs as json.""" os.system(f"ansible-doc -j {module} > data/modules/{module}.json") return module
9e8c3efa07d7b60c987f0c847edcdd0bd3130797
40,323
def _doJPParse(soup): """ Ugly method which parses journey planner html and tries to extract useful information. Who said scraping is easy. """ joptions = [] for row in soup.findAll('tr', recursive=False): # Skip changes section since it gets handled below. if row['class'] == 'changes': continue service = {} row = row.findNext('td', {'class' : 'leaving'}) service['leaving'] = row.contents[0].strip() service['origin'] = row.findNext('td', {'class' : 'origin'}).contents[0].replace('[', '').strip() service['destination'] = row.findNext('td', {'class' : 'destination'}).find('span', {'class' : 'arrow'}).contents[0].replace('[', '').strip() row = row.findNext('td', {'class' : 'arriving'}) service['arriving'] = row.contents[0].replace('[', '').strip() row = row.findNext('td') service['total_time'] = row.contents[0].strip() row = row.findNext('td') service['changes_count'] = row.contents[0].strip() if service['changes_count'] == '': # Changes are involved in this service row1 = row.findNext('a') service['changes_count'] = row1.contents[0].strip() row1 = row1.findNext('tbody') changes = [] for c in row1.findAll('tr', recursive=False): row1 = c.findNext('td') change = {} row1 = row1.findNext('td') change['leaving'] = row1.contents[0].strip() row1 = row1.findNext('td', {'class' : 'origin'}) change['origin'] = row1.contents[0].replace('[', '').strip() row1 = row1.findNext('td', {'class' : 'destination'}).find('span', {'class' : 'arrow'}) change['destination'] = row1.contents[0].replace('[', '').strip() row1 = row1.findNext('td') change['arriving'] = row1.contents[0].strip() changes.append(change) service['changes'] = changes row = row.findNext('td') # Skip the alert icon row = row.findNext('td') service['platform'] = row.contents[0].replace('-', '').strip() joptions.append(service) return joptions
5ef1a6332daa6aaba8ddfe4d700df8090cf23e43
40,324
def ade20k_palette(num_cls=None): """ Generates the ADE20K data-set color palette. Data-Set URL: http://host.robots.ox.ac.uk/pascal/VOC/ Color palette definition: https://docs.google.com/spreadsheets/d/1se8YEtb2detS7OuPE86fXGyD269pMycAWe2mtKUj2W8/edit#gid=0 . Original source taken from: https://gluon-cv.mxnet.io/_modules/gluoncv/utils/viz/segmentation.html . `num_cls`: the number of colors to generate return: the generated color palette """ palette = [ 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] if num_cls is not None: if num_cls >= len(palette): raise Exception("Palette Color Definition exceeded.") palette = palette[:num_cls*3] return palette
fecaeb15214a510c582c34b53e18e6bbdccc3c5d
40,325
import tempfile import os import requests import shutil import zipfile import pandas import io import numpy def calculate_ss2_metrics_csv(csv_zip_url): """Calculte metrics for the zipped csv.""" temp_dir = tempfile.mkdtemp(suffix="csv_zip_test") local_csv_zip_path = os.path.join(temp_dir, os.path.basename(csv_zip_url)) response = requests.get(csv_zip_url, stream=True) with open(local_csv_zip_path, "wb") as local_csv_zip_file: shutil.copyfileobj(response.raw, local_csv_zip_file) csv_zip = zipfile.ZipFile(local_csv_zip_path) csv_name = [n for n in csv_zip.namelist() if n.endswith("expression.csv")][0] exp_pdata = pandas.read_csv( io.StringIO(csv_zip.read(csv_name).decode()), header=0, index_col=0) return { "expression_sum": numpy.sum(exp_pdata.values), "expression_nonzero": numpy.count_nonzero(exp_pdata.values), "cell_count": exp_pdata.shape[0] }
8e184e902e9284930879d20fad5f289d560c6a1e
40,326
def filename_flag(filename, flag): """Add a string to filenames to indicate how they've been processed.""" filename_parts = filename.split('.') output = '' count = len(filename_parts) for part in filename_parts: if count == 1: output = output + '-' + flag + '.' count = count - 1 output = output + part return str(output)
f3cd6016244cf050b732e273be8dea4f94693677
40,327
def ufunc(data_x, bias): """Intercept only model.""" return data_x + bias
a713e80beafbb2293cbe73f1da22ebbf1d3d2e2c
40,328
from typing import Union from pathlib import Path def is_valid_file(filepath: Union[str, Path, None]) -> bool: """check if the passed filepath points to a real file""" if filepath is None: return False return Path(filepath).exists()
dbe6713938ac335d38d4df6a1f80b6595a65969f
40,329
def shorten_record_name(record_name): """ Return the first part of the record (which can be None, comet or comet.connection) """ if record_name is None: return record_name return record_name.split(".", 1)[0]
b84c86b22153f403909e86aa452d6f2c7eea32ca
40,330
import base64 def base64Encoder(plaintext): """Base64加密算法 Args: plaintext: 明文字符串 Returns: 加密后字符串 """ return base64.b64encode(bytes(plaintext, encoding='utf-8'))
d19dfad4fcc68c490710f666e09a76537a0188d1
40,332
import hashlib def digest(key: str) -> str: """Get the hash digest for the key.""" return hashlib.sha256(key.encode()).hexdigest()
82c426ce7f396ac6c5bce38ffe83ba802bb7ed83
40,333
import functools import warnings def deprecated(custom_msg=None, new_func_name=None): """This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. We use this decorator instead of any deprecation library because all libraries raise a DeprecationWarning but since by default this warning is hidden, we use this decorator to manually activate DeprecationWarning and turning it off after the warn has been done.""" def wrap(func): @functools.wraps(func) def wrapped_method(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) # Turn off filter msg = 'Call to deprecated function "{}".'.format(func.__name__) if new_func_name: msg += ' Please, use "{}" function instead.'.format(new_func_name) if custom_msg: msg = custom_msg warnings.warn(msg, category=DeprecationWarning, stacklevel=2) warnings.simplefilter('ignore', DeprecationWarning) # Reset filter return func(*args, **kwargs) return wrapped_method return wrap
119774d6e0eb093b0e9039411ca62b824d6fb4e0
40,334
import os import re def get_special_paths(directory): """Return the absolute paths to all files in the dir matching the pattern """ fileNames = os.listdir(directory) #print fileNames #TESTING specialPaths = [] for thisName in fileNames: if re.search(r"__\w+__", thisName): specialPaths.append(os.path.abspath(os.path.join(directory, thisName))) #print specialPaths #TESTING return specialPaths
4fca9ef7065f8fe8e37c3d6d8487c4394925d248
40,336
def coalesce_dates(dates): """ Coalesces all date pairs into combined date pairs that makes it easy to find free time gaps. >>> from date_collapse import coalesce_dates >>> dates = [(1,4),(2,8),(12,16),(16,21)] >>> cdates = coalesce_dates(dates) >>> print(cdates) [(1, 8), (12, 21)] >>> dates = [(1,4),(2,8),(8,10),(12,16),(16,21),(21,31)] >>> cdates = coalesce_dates(dates) >>> print(cdates) [(1, 10), (12, 31)] """ parsed_dates = [] for date in dates: parsed_dates.extend([(date[0], 1),(date[1], -1)]) parsed_dates.sort(key = lambda d: d[0]) count = 0 coalesced = [] current_block = [None, None] for date in parsed_dates: if count == 0: if not coalesced or (coalesced[-1][1] != date[0]): current_block = [date[0], None] else: coalesced.pop() count += date[1] if count == 0: current_block[1] = date[0] coalesced.append((current_block[0], current_block[1])) return coalesced
161ef92c6c8946a277e11504cb3dee1082582123
40,337
import os def image_diff_dir(image_diff_root: str) -> str: """ Path for store diff images. by default - '{image_diff_root}.tests/image_diff/' """ return os.path.join(image_diff_root, ".tests/image_diff/")
83cf99ffb91f9d22ff8726e88dce25ee429c9ad5
40,338
from typing import List def generate(rows: int) -> List[List[int]]: """https://leetcode.com/problems/pascals-triangle/""" triangle = [] for row_number in range(rows): row = [1 for _ in range(row_number + 1)] # first and last row elements are always 1 for j in range(1, len(row) - 1): row[j] = triangle[row_number - 1][j - 1] + \ triangle[row_number - 1][j] triangle.append(row) return triangle
af108196a0443a9fa7571752259b64a6a36b08b0
40,339
def zscore(rate, mean, std): """Calculates the Z-score from the mean and std.""" zscore = (rate - mean) / std return zscore
f122034a930301182a85db67457ba18b76ceeea0
40,342
def dir_iGen(row_num, dataframe): """ Gets the directory of the data using the actual position of the flare in the list Args: row_num: index of the row dataframe: pandas dataframe Returns: directions of files saved in the remarks column """ row = dataframe.iloc[row_num] directions = row['remarks'] directionsList = [x.strip() for x in directions.split(',')[:-1]] return directionsList
3b67e4bf5c9960db3e32ef56bd0e7e652253bed5
40,343
def dec2bin(k, bitlength=0): """Decimal to binary""" return [1 if digit == '1' else 0 for digit in bin(k)[2:].zfill(bitlength)]
dbe86e082248455495b197d14e71b9fe17595955
40,344
def apply_process(sequence, number_of_times, process): """Apply process function to sequence number_of_times.""" if isinstance(sequence, int): sequence = [int(num) for num in str(sequence)] for _ in range(number_of_times): sequence = process(sequence) return sequence
6db3657360c4dfdb6f38d9241429f66656913135
40,345
def updated(d=None, *ds, **kwargs): """Shallow merges dictionaries together, mutating + returning first arg""" if d is None: d = {} for new_d in ds: if new_d is not None: d.update(new_d) if kwargs: d.update(kwargs) return d
77afb104e392b2202ce75c12e7e6ac571f17872b
40,346
def apply_1overt_decay(params, t): """ Implements the mathematical form: a = a0 / (1 + k*t). Args: params: parameters for the annealing t: iteration number (or you can use number of epochs) Returns: Updated learning rate """ a0 = params['lr0'] # initial learning rate k = params['k'] # decay factor return a0 * 1. / (1 + k*t)
867e43cfe9733d66e469d9223d8b6e2521ca3362
40,347
import re def _SplitFreqRange(freq_range): """Splits a `freq_range` str in a list of numerical (fmin, fmax) tuples.""" try: fmin, fmax = re.split(',|-', freq_range.strip()) return [(float(fmin), float(fmax))] except AttributeError: freq_ranges = [] for one_range in freq_range: fmin, fmax = re.split(',|-', one_range.strip()) freq_ranges.append((float(fmin), float(fmax))) return freq_ranges
db3c7fc2d2a3576ab07b5acdbae9308408a04575
40,349
def _create_key_val_str(input_dict): """ Returns string of format {'key': val, 'key2': val2} Function is called recursively for nested dictionaries :param input_dict: dictionary to transform :return: (str) reformatted string """ def list_to_str(input_list): """ Convert all list items to string. Function is called recursively for nested lists """ converted_list = [] for item in sorted(input_list, key=lambda x: str(x)): if isinstance(item, dict): item = _create_key_val_str(item) elif isinstance(item, list): item = list_to_str(item) converted_list.append(str(item)) list_str = ", ".join(converted_list) return "[" + list_str + "]" items_list = [] for key in sorted(input_dict.keys(), key=lambda x: str(x)): val = input_dict[key] if isinstance(val, dict): val = _create_key_val_str(val) elif isinstance(val, list): val = list_to_str(input_list=val) items_list.append("{}: {}".format(key, val)) key_val_str = "{{{}}}".format(", ".join(items_list)) return key_val_str
4313054d7afd46b216fabe226530c75466fee527
40,350
def mock_target_stocks(): """Mocks target stocks.""" print("mock_target_stocks") return { "ABEO": {"strategy": "long straddle"}, "ACRX": {"strategy": "long straddle"}, "TPCO": {"strategy": "short straddle"}, "TTMI": {"strategy": "short straddle"}, }
9af3908a20d03a6d26a27c520dc8739de0a63f8d
40,352
import re def filter_tests(filters, test_ids): """Filter test_ids by the test_filters. :param list filters: A list of regex filters to apply to the test_ids. The output will contain any test_ids which have a re.search() match for any of the regexes in this list. If this is None all test_ids will be returned :param list test_ids: A list of test_ids that will be filtered :return: A list of test ids. """ if filters is None: return test_ids _filters = list(map(re.compile, filters)) def include(test_id): for pred in _filters: if pred.search(test_id): return True return list(filter(include, test_ids))
d8ca31fddb052dde7eaaa21c777e2963e705a598
40,353
import six import json def get_ip(): """ :rtype: str """ urlopen = six.moves.urllib.request.urlopen # noinspection PyPep8Naming URLError = six.moves.urllib.error.URLError try: return json.loads(urlopen('http://httpbin.org/ip').read().decode('utf-8'))['origin'] except (ValueError, KeyError, URLError): return None
f410620df2d6f62d5a7b349226df4578b4674143
40,354
def _set_stops(loop_data): """Find stops in data This is a private function, not meant for general use. Input: dataframe Output: dataframe """ # Set the text that contains stops loop_data[u'stop'] = 0 loop_data.ix[loop_data.text.str.contains(u'stop_'),u'stop'] = 1 return loop_data
8c6531739edfa87649a432b7c57004979d10f66a
40,355
def choose_best(ordered_choices, possible, check, default=None): """ Select the best xref from several possible xrefs given the ordered list of xref database names. This function will iterate over each database name and select all xrefs that come from the first (most preferred) database. This uses the check function to see if the database contains the correct information because this doesn't always just check based upon the database names or xref, but also the rna_type (in some cases). Using a function gives a lot of flexibility in how we select the acceptable xrefs. Parameters ---------- ordered_choices : list A list of several possible xref database names. These should be in the order in which they are preferred. possible : list The list of xrefs to find the best for. check : callable A callable object to see if given xref and database name match. default : obj, None The default value to return if we cannot find a good xref. Returns ------- selected : obj The list of xrefs which are 'best' given the choices. If there is no good xref the default value is returned. """ for choice in ordered_choices: found = [entry for entry in possible if check(choice, entry)] if found: return (choice, found) return (None, default)
d10d8d1d527fc2a04603a8f4a6c8e10bb5768bdd
40,357
def check_restraint_pairs_for_doubles(list): # Also consider that a1 and a2 can be switches """ check_restraint_pairs_for_doubles checks a list of pairs for doubles. Pairs count as doubles if the order of elements is changed. Parameters ---------- list : t.List[t.Tuple] A list of tuples Returns ------- bool Does the list contain doubles? """ for i in range(len(list) - 1): for j in range(i + 1, len(list)): if (list[i].r1 == list[j].r1 and list[i].r2 == list[j].r2) or ( list[i].r1 == list[j].r2 and list[i].r2 == list[j].r1) or list[i].distance == list[j].distance: return True return False
482da5ba24e5f1ea85375491e3912f00c304d39d
40,359
import requests import random def get_random_word_from_API(URL): """Make a LinkedIn Word Dictionary API call and choose a random word from the response """ response = requests.get(URL) # print response.headers['content-type'] words = response.text words = words.replace('\n', ' ') # get rid of newlines random_word = random.choice(words.split()) return random_word
89cb9f3988431658d7ea950efa559f614e8ea3fc
40,360
def format_time_to_HMS( num_seconds ): """ Formats 'num_seconds' in H:MM:SS format. If the argument is a string, then it checks for a colon. If it has a colon, the string is returned untouched. Otherwise it assumes seconds and converts to an integer before changing to H:MM:SS format. """ if type(num_seconds) == type(''): if ':' in num_seconds: return num_seconds secs = int(num_seconds) nhrs = secs // 3600 secs = secs % 3600 nmin = secs // 60 nsec = secs % 60 hms = str(nhrs)+':' if nmin < 10: hms += '0' hms += str(nmin)+':' if nsec < 10: hms += '0' hms += str(nsec) return hms
2cd24911976f9d502458043b022a3007e1a1611b
40,361
import torch def get_dummy_mask(size: int): """ Return a test mask of length size """ mask = torch.ones(size).byte() idx_mask = [2, 3, 6] mask[[idx_mask]] = 0 return mask, idx_mask
7240f38932e59a8b3a2e5bfe09df7546a1cb06bc
40,362
import requests def get_max_match(): """ Getting the max match number on masterleague """ print('Getting maximum match id') sesh = requests.Session() req = sesh.get("https://api.masterleague.net/matches.json") return req.json()['results'][0]['id']
506a7b07caa85a7d5cc4717456c5f39615a4bac4
40,363
def afri_16(b8a, b11): """ Aerosol Free Vegetation Index 1.6 \ (Karnieli, Kaufman, Remer, and Wald, 2001). .. math:: AFRI_16 = b8a - 0.66 * (b11 / b8a) + 0.66 * b11 :param b8a: NIR narrow. :type b8a: numpy.ndarray or float :param b11: SWIR 1. :type b11: numpy.ndarray or float :returns AFRI_16: Index value .. Tip:: Karnieli, A., Kaufman, Y. J., Remer, L., Wald, A. 2001. \ AFRI - aerosol free vegetation index. Remote Sensing of Environment \ 77,10-21. doi:10.1016/S0034-4257(01)00190-0. """ AFRI_16 = b8a - 0.66 * (b11 / b8a) + 0.66 * b11 return AFRI_16
0d0da371a4fea948032b2dfd1bb89a55ffe19680
40,364
from typing import Tuple def percentage_to_ranged_value( low_high_range: Tuple[float, float], percentage: int ) -> float: """Given a range of low and high values convert a percentage to a single value. When using this utility for fan speeds, do not include 0 if it is off Given a low value of 1 and a high value of 255 this function will return: (1,255), 100: 255 (1,255), 50: 127.5 (1,255), 4: 10.2 """ return (low_high_range[1] - low_high_range[0] + 1) * percentage / 100
ea3d4411c6561094934eba63bfe8e9bfc0a474da
40,367
def cli(ctx, library_id, contents=False): """Get information about a library. Output: details of the given library """ return ctx.gi.libraries.show_library(library_id, contents=contents)
12c71321e21010e7d395c5b8f41ca1ec4a01bee3
40,368
def search_for_matches_in_a_sorted_array(input_array, match_value, start_position = 0, search_direction = +1, output_allmatches = False): """ Example: xmatch2 = search_for_matches_in_a_sorted_array(array2, array1[i1], i2, -1) """ i = start_position xmatches = [] while (i >= 0 and i <= len(input_array)-1): val = input_array[i] if val == match_value: xmatches.append(i) if not output_allmatches: break else: if search_direction > 0: if val > match_value: break elif search_direction < 0: if val < match_value: break i = i + search_direction return xmatches
9dbbea795f40e927d9df0840f3b9d999d60450f7
40,370
def reserved(num): """ Return reserved bytes - zeros """ return bytearray(num)
3e76b61a3c71179d3b3c8573420b60431f711d31
40,372
import sys import six def to_str(value): """Convert the input to a string, unless it is a unicode string in Python 2. Unicode strings are supported as native strings in Python 3, but ``str()`` cannot be invoked on unicode strings in Python 2, so we need to check for that case when converting user-specified values to strings. Args: value: The value to convert to a string. Returns: str or unicode: The string representation of the value or the unicode string itself. """ if sys.version_info.major < 3 and isinstance(value, six.string_types): return value return str(value)
661ead8f161d067f9e033edd02c00daf39f1f02e
40,373
async def async_setup_entry(hass, entry): """Set up Sonos from a config entry.""" hass.async_add_job(hass.config_entries.async_forward_entry_setup( entry, 'media_player')) return True
5a08609b582fa8662408992d059ae4b1dc9638fd
40,374
def episode_player_url(episode): """Return the player URL for the given episode code""" player_url = 'http://www.bbc.co.uk/radio/player/{}' return player_url.format(episode)
99400b8d0b0a8ed8bcd73e788ecad0e19764cc33
40,375
def AGIsurtax(c00100, MARS, AGI_surtax_trt, AGI_surtax_thd, taxbc, surtax): """ Computes surtax on AGI above some threshold. """ if AGI_surtax_trt > 0.: hiAGItax = AGI_surtax_trt * max(c00100 - AGI_surtax_thd[MARS - 1], 0.) taxbc += hiAGItax surtax += hiAGItax return (taxbc, surtax)
70e92547ac20ce1944c2cef2dc34fbc239657d25
40,376
def count_positives_and_negatives(df_crowd_results, crowd_score_col, expert_score_col, crowd_value): """ Help function for reading the crowd results """ true_positive = 0 true_negative = 0 false_positive = 0 false_negative = 0 for j in range(len(df_crowd_results.index)): if df_crowd_results[crowd_score_col].iloc[j] >= crowd_value: if df_crowd_results[expert_score_col].iloc[j] == 1: true_positive = true_positive + 1 else: false_positive = false_positive + 1 else: if df_crowd_results[expert_score_col].iloc[j] == 1: false_negative = false_negative + 1 else: true_negative = true_negative + 1 return true_positive, true_negative, false_positive, false_negative
4813bce878d82dfd1059ff67c0fa80d4a189f634
40,379
def local_cov_pair(x, y, neighbors, weights): """Test statistic for local pair-wise autocorrelation""" out = 0 for i in range(len(x)): for k in range(neighbors.shape[1]): j = neighbors[i, k] w_ij = weights[i, k] xi = x[i] xj = x[j] yi = y[i] yj = y[j] out += w_ij*(xi*yj + yi*xj)/2 return out
73961c044189f115998dc2a07a936c6a13535bf6
40,380
def welcome(): """Welcome""" print("========Welcome to PizzaParlour!========\n") print("====What would you like to do? (Type the number)====\n" "1. create new order\n" "2. Update an existing order\n" "3. Getting our menu\n" "4. Quit") action = input("Choose a number: ") return action
980554aafc85f5677b28c6ebbb41f7770b3cc0c3
40,381
from typing import List import tokenize def get_token_lines(file_name: str) -> List[List[tokenize.TokenInfo]]: """ Returns a list of tokenized lines from the given file. """ token_lines = [] with open(file_name, 'rb') as f: tokens = list(tokenize.tokenize(f.readline)) line = 0 token_line = [] for token in tokens: if token.type == tokenize.ENCODING: continue if token.start[0] == line: token_line.append(token) else: token_lines.append(token_line) token_line = [token] line = token.start[0] token_lines.append(token_line) return token_lines
c79ef322c4ab54693d88089bfb3163fd4e9ce849
40,382
import sys def getRootFofOS(): """returns the root system file path, hopefully.""" if sys.platform.startswith("Win"): return str("C:\\") # pragma: no cover else: return str("""/""")
6130e8c5064c4609459d0ceb9401b5b7a76c8677
40,383
import torch def build_sparse_adjacent_matrix(edges: list, n: int, device=None, dtype=torch.float, undirectional=True): """ Return adjacency matrix :param edges: list of edges, for example (st, ed) :param n: number of vertices :param device: :param dtype: :param undirectional: make adjacency matrix un-directional :return: the sparse adjacent matrix """ i = torch.tensor(list(zip(*edges))) v = torch.ones(i.shape[1], dtype=dtype) sparse = torch.sparse_coo_tensor(i, v, (n, n)) if device is not None: sparse = sparse.to(device) a = sparse.to_dense() if undirectional: ud_a = ((a > 0) | (a.transpose(-2, -1) > 0)).to(dtype) a = ud_a return a
bffcea8b65cd3c94c8b0da33aad3dc4108ce0519
40,384
from typing import Sequence from typing import Optional def normalize_and_invert_weights( weights: Sequence[float], max_value: Optional[float] = None ) -> Sequence[float]: """ Normalize a list of weights and invert them. """ # Return if empty if len(weights) == 0: return weights # Get actual max value of weights if none is given if max_value is None: max_value = max(weights) else: assert max_value >= max(weights) # Normalize and invert return [1 - (val / max_value) for val in weights]
4a4a38264c5fc9e3090b16e7974216b57943c472
40,385
import sys def get_pyver(pyverstr=None): """ Args: pyver (str): "major.minor" e.g. ``2.7`` or ``3.4`` (default: ``sys.version_info[:2]``) Returns: str: ``python2.7``, ``python.34`` """ if pyverstr is None: pyver = 'python%d.%d' % sys.version_info[:2] else: pyver = 'python%s' % pyverstr return pyver
982f93aba3b5705bc0619d5a76501097e6c7ed03
40,387
def check_probes_for_dupes(revmatches): """Check for UCE probes that match more than one contig""" dupe_set = set([i for uce, node in revmatches.iteritems() if len(node) > 1 for i in list(node)]) return dupe_set
263bd9c0442ddb15c0d7544c922cb85e79ed8222
40,388
def calculate_score(card_list): """Take a list of cards and return the score calculated from the cards""" if sum(card_list) == 21 and len(card_list) == 2: return 0 if sum(card_list) > 21 and 11 in card_list: card_list.remove(11) card_list.add(1) return sum(card_list)
47b6ce63e984725b4d89ee4f089abef67409eaa5
40,390
def my_default(cls): """ Code chunk used in creating classes: returns default. """ # pylint: disable=protected-access return cls._default
23cff183bddc18074114536ccab08ce08d0e0818
40,392
def oz_to_g(oz): """ Convert ounces to grams """ return oz * 28.34952
0c547b3b95964e25ace4d00d9c491f367282b89f
40,393
def check_rdtest(record, start, end, rdtest): """ Check if putative insertion has depth support """ rdtest_record = record.copy() rdtest_record.pos = start rdtest_record.stop = end rdtest_record.info['SVTYPE'] = 'DUP' if end - start < 1000: return rdtest.test_record(rdtest_record, cutoff_type='pesr_lt1kb') else: return rdtest.test_record(rdtest_record, cutoff_type='pesr_gt1kb')
d2d379297dc1f9595099e2c86a1eb4ac57a7239e
40,394
def lam2f(l): """ Computes the photon frequency in Hz Parameters ---------- l : float Photon wavelength in m Returns ------- f : float Frequency in Hz """ f = 299792458/l return f
5e6d5745c1a19f4b2a8def3fbdca707e60634019
40,395
def _mangle(cls, name): """ Given a class and a name, apply python name mangling to it :param cls: Class to mangle with :param name: Name to mangle :return: Mangled name """ return f"_{cls.__name__}__{name}"
8b789a03b2f25c71bc661cc1eb394650087128b9
40,397
def ObjToStringSingleObj(obj): """ :param obj: """ if obj is None: return 'None' return str(obj.__class__) + '\n' + '\n'.join( (repr(item) + ' = ' + repr(obj.__dict__[item]) for item in sorted(obj.__dict__)))
45c4db439e955bd89d3628e94513d88a832ace62
40,398
import copy def slice_out_marker_single(start, stop, start_stop_pulse): """ check if start stop falls in valid range. Args: start (double) : startpoint of where the marker must be in end (double) : endpoint where the marker must be in. start_stop_position (marker_pulse) : tuple iwht start and stop point of the marker. Return: True/False if start and stop are not in range start_stop_position (tuple) : sliced time. Function also fixes the time in the pointer that is given. """ if start_stop_pulse.stop <= start or start_stop_pulse.start >= stop: return False result = copy.copy(start_stop_pulse) if result.start < start: result.start = start if result.stop > stop: result.stop = stop result.start -= start result.stop -= start return True, result
f8b6f0940d2c73684b2be83e4d7d7639780f0034
40,399
def _prefixed_path(path): """Prefix paths with a token used in the xctoolrunner tool. Prefix paths with a token to indicate that certain arguments are paths, so they can be processed accordingly. This prefix must match the prefix used here in tools/xctoolrunner/xctoolrunner.py Args: path: Path to the resource to be prefixed. Returns: The path prefixed for xctoolrunner. """ prefix = "[ABSOLUTE]" return prefix + path
d58dd3e6b6cdf8aa1ed296612fd1cbb27248cc17
40,400
from typing import List from typing import Dict from typing import Optional def aggregate_statuses(statuses: List[Dict], dc_voltage=False) -> Optional[Dict]: """Aggregates inverter statuses for use for PVOutput.org uploads. Does some rounding and integer conversion. Args: statuses: List of inverter statuses as returned by Inverter.status(). dc_voltage: If True, aggregates DC voltage instead of AC voltage. Returns: Dictionary of keyword arguments for add_status() or None if no inverter has operation mode normal. """ def avg(items): """Calculates average.""" i = list(items) return sum(i) / len(i) # Calculate values for each inverter separately values = [] for s in statuses: # Filter systems with normal operating mode if s['operation_mode'] != "Normal": continue # Calculate voltage if dc_voltage: # Takes average of PV1 and PV2 voltage voltage = avg([s['pv1_voltage'], s['pv2_voltage']]) elif 'grid_voltage_r_phase' in s: # For three-phase inverters, take average voltage of all three phases voltage = avg([s['grid_voltage_r_phase'], s['grid_voltage_s_phase'], s['grid_voltage_t_phase']]) else: # For one phase inverter, pick the grid voltage voltage = s['grid_voltage'] values.append({ 'energy_gen': int(s['energy_today'] * 1000), 'power_gen': int(s['output_power']), 'temp': s['internal_temperature'], 'voltage': voltage, }) # Aggregate values of all inverters if not values: return None return { 'energy_gen': sum(v['energy_gen'] for v in values), 'power_gen': sum(v['power_gen'] for v in values), 'temp': round(avg(v['temp'] for v in values), 1), 'voltage': round(avg(v['voltage'] for v in values), 1), }
8432ec1f14c3e96360934df456e655eb08553f37
40,401
def traverse(fileName, steepness=(1, 3), start=0): """Count collisions of a traversal of a slope. Parameters: fileName: of tree-map start: index of start position steepness: tuple of horizontal speed (veer) and vertical speed (plummet).""" width = 0 collisions = 0 position = start progress = 0 plummet, veer = steepness with open(fileName) as inFile: for line in inFile: if width == 0: #firstLine width = len(line) - 1 # don't count newline progress %= plummet if progress == 0: if(line[position] == '#'): collisions += 1 position += veer position %= width progress += 1 return collisions
45b13edb3726b19d0df3e347dd177390568a54c2
40,403
def match_host(host, domainlist): """Return True if host matches an entry in given domain list.""" if not host: return False for domain in domainlist: if domain.startswith('.'): if host.endswith(domain): return True elif host == domain: return True return False
099ea605da3734433a564815c1eb58d7d58dfd5a
40,404
def isthaichar(ch: str) -> bool: """ Check if a character is Thai เป็นอักษรไทยหรือไม่ :param str ch: input character :return: True or False """ ch_val = ord(ch) if ch_val >= 3584 and ch_val <= 3711: return True return False
e50f78105c3db03dc4ee8bac7735a1d809d53656
40,406
def deep_search(node, n, k): """ :type node: TreeNode :type k: int :rtype: (int,int) """ if node.left: x, n = deep_search(node.left, n, k) if n == k: return x, n x, n = node.val, n + 1 if n == k: return x, n if node.right: x, n = deep_search(node.right, n, k) if n == k: return x, n return x, n
a1eeff087fc823baa0345caf48c16bac7ed0f4fc
40,407
import subprocess def shellExecErrorCode(cmd): """Execute shell command and returns error code.""" return subprocess.call(cmd, shell=True)
b25305ff78e3b67a2b459a414e8845a677a0b3d5
40,408
def get_prop_architecture(typology_df, architecture_DB): """ This function obtains every building's architectural properties based on the construction and renovation years. :param typology_df: DataFrame containing each building's construction and renovation categories for each building component based on the construction and renovation years :type typology_df: DataFrame :param architecture_DB: DataFrame containing the archetypal architectural properties for each use type, construction and renovation year :type categories_df: DataFrame :return prop_architecture_df: DataFrame containing the architectural properties of each building in the area :rtype prop_architecture_df: DataFrame """ # create prop_architecture_df based on the construction categories and archetype architecture database prop_architecture_df = typology_df.merge(architecture_DB, left_on='STANDARD', right_on='STANDARD') return prop_architecture_df
3dc4bfe88783c2a20a12a8951db789b3cdcfd460
40,410
def sets_to_contingency(a, b, N): """ Creates a contingency table from two sets. params: a, b: sets to compare N: total number of possible items returns: (f11, f10, f01, f00) tuple of contingency table entries: f11 = # of items both in a and b f10 = # of items only in a f01 = # of items only in b f00 = # of items not in either a or b """ f11 = len(a.intersection(b)) f10 = len(a) - f11 f01 = len(b) - f11 f00 = N - (f11 + f10 + f01) return (f11, f10, f01, f00)
3d782fb47899c6e401034750dddfaffc98f9afc2
40,412
def uniq_list(inlist): """Remove unique elements from a list""" inset = set(inlist) return list(inset)
7699d42cbfad14f2479c8cf133113c62ae236ab4
40,413
from typing import Dict from typing import Any def get_field(data: Dict[str, Dict[str, Any]], key: str) -> Any: """ Get a field from nested dictionary, with the field denoted with dot-separated keys. For example, "a.b.c" -> data['a']['b']['c'] """ keys = key.split(".") while keys: data = data[keys.pop(0)] return data
efd342e3badde4e83d3ad344a2188b7fb49b6d04
40,414
import glob def glob_if_needed(filenames): """Workaround for platforms without shell-level globbing.""" if len(filenames) == 1: return glob.glob(filenames[0]) or filenames return filenames
327bec5194830ae9b547816b7d52c02673cbf134
40,415
def divide_training_data(digits): """トレーニングデータと、テストデータに分割する""" # トレーニングデータ / テストデータの区切り train_size = int(len(digits.data) * 4 / 5) # トレーニングデータ train_data = digits.data[:train_size] # トレーニングデータの正解 train_label = digits.target[:train_size] # テストデータ test_data = digits.data[train_size:] # テストデータの正解 test_label = digits.target[train_size:] return train_data, train_label, test_data, test_label
6f5caf7fc8069cebffb46e39a2f95c568b3ae5bc
40,417