content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def clean_layer_name(input_name: str, strip_right_of_last_backslash: bool=True, strip_numerics_after_underscores: bool=True): """ There exist cases when layer names need to be concatenated in order to create new, unique layer names. However, the indices added to layer names designating the ith output of calling the layer cannot occur within a layer name apart from at the end, so this utility function removes these. Parameters ---------- input_name: str, required A Keras layer name. strip_right_of_last_backslash: bool, optional, (default = True) Should we strip anything past the last backslash in the name? This can be useful for controlling scopes. strip_numerics_after_underscores: bool, optional, (default = True) If there are numerical values after an underscore at the end of the layer name, this flag specifies whether or not to remove them. """ # Always strip anything after :, as these will be numerical # counts of the number of times the layer has been called, # which cannot be included in a layer name. if ':' in input_name: input_name = input_name.split(':')[0] if '/' in input_name and strip_right_of_last_backslash: input_name = input_name.rsplit('/', 1)[0] if input_name.split('_')[-1].isdigit() and strip_numerics_after_underscores: input_name = '_'.join(input_name.split('_')[:-1]) return input_name
69c3bb315c770b58ecd6f3afd0dae95a53b2a59f
26,498
def parse(argv): """Parse optional list of keyword arguments into a dict. Parses a list of keyword arguments defined by a leading ``--`` and separated by ``=`` (for example, --key=value). Args: argv (listof str): Keyword dict to use as an update. Examples:: # Import the kwconfig module import kwconfig # Create a sample list of keyword arguments argv = ['--key1=value1', '--key2=value2'] # Parse into a keyword dict kwdict = kwconfig.parse(argv) # View the values of key1 and key2 print('key1: ' + kwdict['key1']) print('key2: ' + kwdict['key2']) """ kwdict = {} for kv in argv: k, v = kv.split('=', 1) kwdict[k.strip('-')] = v return(kwdict)
a82162666245b4d95f6cc439437d13a596f381ff
26,499
import random def rand_credits_alipay_tenpay(min_credits, max_credits): """ random to produce the alipay and tenpay nums :param min_fund: the minimum credits of this day :param max_fund: the maximum credits of this day :return:alipay and tenpay credits """ credits_today = random.randint(min_credits, max_credits) alipay_credits = random.randint(int(min_credits/2), int(2*credits_today/3)) tenpay_credits = credits_today - alipay_credits credits_wrap = [] return credits_today, alipay_credits, tenpay_credits
c6b0f35781e7f800b057b353b288d762431d39db
26,501
import ipaddress def get_public_blocks(): """ Get public cidrs. """ def address_exclude_list(original, exclude): full_network_list = [] if not exclude: return [original] if original.overlaps(exclude[0]): for new_block in original.address_exclude(exclude[0]): full_network_list.extend(address_exclude_list(new_block, exclude[1:])) else: full_network_list.extend(address_exclude_list(original, exclude[1:])) return full_network_list return address_exclude_list( ipaddress.IPv4Network("0.0.0.0/0"), [ipaddress.IPv4Network("10.0.0.0/8"), ipaddress.IPv4Network("127.0.0.0/8"), ipaddress.IPv4Network("172.16.0.0/12"), ipaddress.IPv4Network("192.168.0.0/16")])
dcf1171f1b3c96a48fea6fc6f8266912dcd9f2e4
26,502
def read_psv(name, first=False): """returns list of lists""" res = [i.strip().split('|') for i in open(name, 'r').readlines()] return res[1:] if first else res
5a9c0426c75975f7f96f4e4f554da43c67672d00
26,503
import os import json def get_json_conf(): """ Reads the JSON configuration file. It should be in the same folder as the script. """ pwd = os.getcwd() os.chdir(pwd) # json_data = {} with open('config.json', 'r') as json_file: json_data = json.load(json_file) return json_data
7778694f9a881408174efd1b1da9e3ae0b39491f
26,506
def combine_satpixs(): """ The options that can be used to replace saturated pixels when combining a set of frames """ methods = ['reject', 'force', 'nothing'] return methods
08deaeb6cf9412aad44c2a667f37f143addfc3b7
26,507
def a_or_an(next_word): """ Chooses 'a' or 'an' depending on first letter of next word. Add in special cases if next word is 'hour' or something. """ if next_word[0] in ['a', 'e', 'i', 'o', 'u']: return 'an' else: return 'a'
44a172106d81343392b4380b4016dac0d73e961a
26,509
def cut_neighbor_sequences(seq_s, flanking_i): """ cut the flanking sequences :param seq_s: string, seq :param flanking_i: size of flanking seq :return: strings, cut (start), cut (the rest), cut (last) """ assert type(seq_s) is str return seq_s[0:flanking_i], seq_s[flanking_i:-flanking_i], seq_s[-flanking_i:]
65f9b1fb45c46e0d968533ff0e81099e0526f571
26,510
import math def distance_l2(loc_1, loc_2): """ :param loc_1: a tuple (x_1, y_1) :param loc_2: a tuple (x_2, y_2) :return: L2 distance between loc_1 and loc_2 """ x_difference = loc_1[0] - loc_2[0] y_difference = loc_1[1] - loc_2[1] return math.sqrt(x_difference*x_difference + y_difference*y_difference)
c4600f3862d491dd718fc69d0d7901f9c091b990
26,511
import hashlib def calc_md5(content: bytes) -> str: """ Calc the md5 checksum for the specified bytes. """ return hashlib.md5(content).hexdigest()
7cfa752840502ab07ac1d321ac504fa23584e6fd
26,512
def replace(line, a, b): """ if line starts with string a, then replace string a with string b in line """ mline = line if line.startswith(a): mline = line.replace(a,b) return(mline)
2228404e10b21b9095257e347bdd1b289d1707c2
26,514
def copy_attribute_summary(source, meta): """Copy an attribute summary, removing values.""" dest = {} for key in meta["summary"]: if key.startswith("median") and "median" in source: dest["median"] = source["median"] elif key != "list" and key in source: dest[key] = source[key] try: dest["%s_value" % meta["type"]] = source["%s_value" % meta["type"]] except KeyError as err: raise (err) dest["count"] = source["count"] dest["key"] = source["key"] return dest
156a674ae9376f01e295d47442c6e1a2513974f0
26,515
import requests import logging def weather_forecast(appid, latitude, longitude, language, units): """get weather forcast data using openweather api """ try: resopnse = requests.get( "https://api.openweathermap.org/data/2.5/onecall" + "?appid={}&lat={}&lon={}&lang={}&units={}".format( appid, latitude, longitude, language, units)) resopnse.raise_for_status() return resopnse.json() except Exception as e: logging.error(e, exc_info=True) return None
277d7460b2220179c3c8ea1c9f05b4b2ae97fac3
26,516
def multiply(args): """Function multiples the passed numbers together""" product = 1 # multiply the passed values for x in args: product *= x return product
f166b1f03e9869f34cc689f189238b1ad0f0cc87
26,518
import argparse def parse_args(): """ Parses command line arguments. """ parser = argparse.ArgumentParser() parser.add_argument('--i', dest='image_file', default='', action='store', required=True, help="L1B image file and path.") parser.add_argument('--b', dest='band2extract', default=1, action='store', required=True, help="The band to plot. E.g., '3'.") parser.add_argument('--s', dest='scene2extract', default=None, action='store', required=False, help="[OPTIONAL] The scene(s) to plot as a single scene 'HHMM' or a range of scenes 'HHMM HHMM'. By default all scenes plotted.") parser.add_argument('--vmax', dest='vmax', default=0.4, action='store', required=False, help="[OPTIONAL] The max to stretch in range [0,1]. Default of 0.4.") parser.add_argument('--overlay', dest='overlay_l1b', default=False, action='store_true', required=False, help="[OPTIONAL] Overlay the L1B image.") parser.add_argument('--l', dest='chip_file', default='', action='store', required=False, help="[OPTIONAL] File containing list of chip names, one chip name per line.") parser.add_argument('--m', dest='measurement_files', default=None, action='store', required=False, help="[OPTIONAL] Measurement files and paths. If this is selected, it overrides the data specification option (--d). \n \ NOTE: Do not mix satellites and metrics. The satellite and metric labels will be taken from the first file after they are sorted alphabetically.") parser.add_argument('--d', dest='dataspec', default=None, action='store', required=False, help="[OPTIONAL] The satellite, metric, coverage, and the date range of measurement files to search from MMDDYYYY to MMDDYYYY. E.g., 'G17 NAV FULL 07182020 12312020'. \n \ This will be overriden if a text file listing specific measurement files (--m) is provided.") args = parser.parse_args() return args
965256e37f5cb28157021cb85bb8eb554f7a2e7e
26,519
def _has_symbol(symbol, name): """ Check if has provided symbol in name. Recognizes either the _SYMBOL pattern at end of string, or _SYMBOL_ in middle. """ return name.endswith('_' + symbol) or ('_' + symbol + '_') in name
cfd2fb8ba0751f7abc939ac6c84fbe8b1aa3925f
26,521
import requests def joke(context) -> str: """Tell a random joke""" resp = requests.get( "https://icanhazdadjoke.com/", headers={"Accept": "application/json"} ) return resp.json()["joke"]
83ec9089a7853ef95832de0408888bf458f36852
26,523
import argparse def parse_args(): """ parse input args """ parser = argparse.ArgumentParser() parser.add_argument("--input_file", type=str, help="xml reports need to change name") parser.add_argument("--testsuite_old_name", type=str, help="old testsuite name need to be changed") return parser.parse_args()
d75c0e4b1d77bd7614635e2864abc23c9ba1f544
26,525
def Singleton(clazz): """ The singleton annotation applying this annotation on class, will make sure that no more than single instance of this class can be created. """ instances = {} def fetch_instance(*args, **kwargs): if clazz not in instances: instances[clazz] = clazz(*args, **kwargs) return instances[clazz] return fetch_instance
0be738390f2cc3d39bafb5474bdfdbaa264e837f
26,527
def get_fib_header_type(hdr): """ Get type. """ return hdr[0]
dd3108add54d4432cba1937eb213797d6c7c1ce8
26,528
def verta_hook(run): """ PyTorch module hook that automates logging to Verta during training. This hook logs details about the network topology. See our `GitHub repository <https://github.com/VertaAI/modeldb/blob/master/client/workflows/examples/pytorch-integration.ipynb>`__ for an example of this intergation in action. .. versionadded:: 0.13.20 Parameters ---------- run : :class:`~verta._tracking.experimentrun.ExperimentRun` Experiment Run tracking this model. Examples -------- .. code-block:: python from verta.integrations.torch import verta_hook run = client.set_experiment_run() model.register_forward_hook(verta_hook(run)) output = model(X_train) """ def hook(module, input, output): for i, layer in enumerate(module.children()): try: run.log_hyperparameter("layer_{}_name".format(i), layer.__class__.__name__) except: pass # don't halt execution layer_params = { "layer_{}_{}".format(i, attr): getattr(layer, attr) for attr in layer.__dict__ if not attr.startswith('_') and attr != "training" } try: run.log_hyperparameters(layer_params) except: pass # don't halt execution return hook
f1c3a4f9de8d070d27189b258ea3f11b39e45530
26,529
def get_value_from_json(json_dict, sensor_type, group, tool): """Return the value for sensor_type from the JSON.""" if group not in json_dict: return None if sensor_type in json_dict[group]: if sensor_type == "target" and json_dict[sensor_type] is None: return 0 return json_dict[group][sensor_type] elif tool is not None: if sensor_type in json_dict[group][tool]: return json_dict[group][tool][sensor_type] return None
2d1faca07592d9db9e137c2583b2636fd5b85158
26,530
import argparse def get_arguments(): """ Get argument from command line :return: args object """ parser = argparse.ArgumentParser(prog="python3 olympics.py", description="Search for data in olympics database", epilog="Run -h for each positional arguments to see more details.") subparsers = parser.add_subparsers(help='sub-command help', dest='subcommand') parser_athlete = subparsers.add_parser('athlete', help="print athletes based on different options") parser_athlete.add_argument('-n', '--nocs', type=str, nargs='+', help="print all athletes in the given nocs in the arguments") parser_athlete.add_argument('-a', '--name', type=str, nargs='+', help="print all athletes whose names contains the arguments") parser_noc = subparsers.add_parser('noc', help="print nocs based on counts of medals") parser_noc.add_argument('-m', '--medal', type=str, nargs='?', choices=['gold', 'silver', 'bronze'], help="order nocs based on the type of medal in the argument") parser_game = subparsers.add_parser('game', help="print the country winning the most number of gold in a give year") parser_game.add_argument('-y', '--year', type=str, nargs='?', help="print the country winning the most number of gold in a give year") args = parser.parse_args() return args
f1d8d8724762e1640fcd7a30169defd40e55be3f
26,531
def has_duplicates(l): """ Returns whether a given list contains duplicate elements. """ seen = set() for x in l: if x in seen: return True seen.add(x) return False
572b64dd885cb3726176a708b656409b0f484a5e
26,533
import numpy def load_word_embedding( filename ): """ Parameters ---------- filename : str path to the word embedding binary file (trained by skipgram-train.py) Returns ------- embedding : ndarray 2D matrix where each row is a word vector """ with open( filename, 'rb' ) as fp: shape = numpy.fromfile( fp, dtype = numpy.int32, count = 2 ) embedding = numpy.fromfile( fp, dtype = numpy.float32 ).reshape( shape ) return embedding
098967941f493f759e732fb8fca3ab0bea43bcb7
26,534
def fatorial(n, show=False): """ Fatora um número n = numero a ser fatorado show(opc) = deicidi se mostra ou nao o calculo return = retorna o numero fatorado """ contador = 1 while n > 0: if show == True: if n > 1: print(f'{n} x ', end='') if n == 1: print(f'{n} = ', end='') contador *= n n = n - 1 return contador
7a1624e49c933ed6c4e823235ad65dad6e59ea7c
26,536
def get_lock_key(object_id): """Determines the key to use for locking the ``TimeSlot``""" return 'locked-%s' % object_id
4ee42d1cac77a72fcb132292bd5b635fe576bbf4
26,538
def index_structure(structure, path): """Follows :obj:`path` in a nested structure of objects, lists, and dicts.""" for key in path.split("/"): if isinstance(structure, list): try: index = int(key) structure = structure[index] if index < len(structure) else None except ValueError: raise ValueError("Expected a list index, got %s instead" % key) elif isinstance(structure, dict): structure = structure.get(key) else: structure = getattr(structure, key, None) if structure is None: raise ValueError("Invalid path in structure: %s" % path) return structure
33ef0551b0c0a142b930c1593fac0d5870289a4d
26,539
from typing import Dict def generate_options_string(options: Dict[str, str]) -> str: """Build the options string from the options dict.""" options_list = [ '{key} {option}'.format(key=key, option=options[key]) for key in options ] return ' '.join(options_list)
577105dea1dc2ec4e0012fe5dbdf546d6eefc550
26,541
import numpy def imread(fname, norm=True): """Return image data from img&hdr uint8 files.""" with open(fname + '.hdr', 'r') as fh: hdr = fh.readlines() img = numpy.fromfile(fname + '.img', numpy.uint8, -1) img.shape = int(hdr[4].split()[-1]), int(hdr[3].split()[-1]) if norm: img = img.astype('float64') img = img/255.0 return img
505779a7db0792dbf838e87fbb59aaf13dfb52b1
26,542
import socket def get_ip(fqdn: str): """Get IP Address of fqdn.""" return socket.gethostbyname(fqdn)
f6a682112071915f098c8fdd682b6400fb3c74f7
26,543
def lookup_code(blockettes, blkt_number, field_name, lookup_code, lookup_code_number): """ Loops over a list of blockettes until it finds the blockette with the right number and lookup code. """ # List of all possible names for lookup for blockette in blockettes: if blockette.id != blkt_number: continue if getattr(blockette, lookup_code) != lookup_code_number: continue return getattr(blockette, field_name) return None
a106c54dcda1cdaf5b872afa5abc5bfea50ad147
26,544
def progress_heuristic(losses): """ The progress heuristic: how to determine that it's time to stop CG? There are many possible ways to address this question, and the progerss heuristic is a pretty decent way; we look at the sequence of losses as defined by the quadratic model. That is, losses[i] = ip(g,x_i) -.5*ip(x_i,Ax_i). If the progress heuristic notices that losses stops getting better relative to the size of the reduction, it'll return True, which means that CG should stop. Otherwise it should return false. It is copied verbatim from the original HF paper. """ eps = 0.0005 i = len(losses) k = int(max(10, 0.1*i)) if len(losses) < k+1: return False phi_x_i = losses[-1] phi_x_imk = losses[-1-k] if i>k and phi_x_i<0 and (phi_x_i-phi_x_imk)/phi_x_i < k*eps: return True else: return False
bba245dd7e3229786d225e58a5bbd664181d1e4a
26,545
def test_minimum_bound_rect(): """ Find area of minimum rectable covering all "1" in the matrix """ """ 0 0 0 0 0 1 0 0 0 0 1 0 0 1 1 1 """ def min_rect(mat): W, H = len(mat[0]), len(mat) minx, maxx = W-1, W-1 miny, maxy = H-1, H-1 # identify top-left for x in range(W): for y in range(miny+1): if mat[y][x]==1: miny = y if y<miny else miny minx = x if x<minx else minx if x==y==0: break # identify bottom-right for x in range(W-1, minx-1, -1): for y in range(H-1, miny-1, -1): if mat[y][x]==1: maxy = y if y>maxy else maxy maxx = x if x>maxx else maxx if x==W-1 and y==H-1: break print(f'TL = {minx}, {miny}') print(f'BR = {maxx}, {maxy}') area = (maxy-miny+1) * (maxx-minx+1) return area R1 = [[1,0],[0,1]] assert(min_rect(R1)) == 4 R2 = [[0,0,0,0],[0,1,0,1],[0,0,1,0],[0,1,1,0]] assert(min_rect(R2)) == 9 R3 = [[0,0,0,0],[0,0,1,0],[0,0,1,0],[0,1,0,1],[0,0,0,1]] assert(min_rect(R3)) == 12
1dd2bdb499e14136fdb37d4467adab3b01e939e8
26,548
def 进制_二到十(二进制文本): """ 将二进制转换成十进制(返回十进制整数) """ return int(二进制文本, base=2)
3e2e6e55e05626599c62cab3788c00a6d3d0ae30
26,549
def read_file(filename): """ Fully reads a file into a UTF-8 string. """ with open(filename, 'r') as f: return f.read()
8c83f748682bb2c1857f927e7749f37175463c46
26,550
def dowker_to_gauss(code): """ Convert from Dowker-Thistlethwaite code to signed Gauss code. EXAMPLES:: sage: from sage.knots.gauss_code import dowker_to_gauss sage: dowker_to_gauss([6,-12,2,8,-4,-10]) [-3, 1, 6, -2, -1, 3, -4, 4, 2, -5, 5, -6] sage: dowker_to_gauss([-4,-6,-2]) [2, -1, 3, -2, 1, -3] TESTS:: sage: dowker_to_gauss([]) [] """ n = len(code) # vertices are numbered by half of their even label signes = {abs(j): (1 if j > 0 else -1) for j in code} gauss = [] for i in range(1, 2 * n + 1): if i % 2: letter = code[(i - 1) // 2] // 2 gauss.append(-letter) else: gauss.append(signes[abs(i)] * i // 2) return gauss
e803d8b3c346464859e7ad4f012e655e022fbf02
26,552
import re def _globtest(globpattern, namelist): """ Filter names in 'namelist', returning those which match 'globpattern'. """ pattern = globpattern.replace(".", r"\.") # mask dots pattern = pattern.replace("*", r".*") # change glob sequence pattern = pattern.replace("?", r".") # change glob char pattern = '|'.join(pattern.split()) # 'or' each line compiled = re.compile(pattern) return list(filter(compiled.match, namelist))
6c81baecf0a091b5fb55d51474257bff919bf78c
26,553
def _humanize(value): """Return "humanized" version of ``value``.""" if isinstance(value, dict): return "{...}" # abbreviation elif isinstance(value, list): return "(...)" # abbreviation else: return repr(value)
6f8aa414ab8900799c091f412797edbb38273329
26,554
def format_fasta_filename(*args): """ Format a FASTA filename of the form "otu.isolate.sequence_id.fa". :param args: the filename parts :return: a compound FASTA filename :rtype: str """ if len(args) > 3: raise ValueError("Unexpected number of filename parts") if len(args) == 0: raise ValueError("At least one filename part required") filename = ".".join(args).replace(" ", "_") + ".fa" return filename.lower()
e0a61fa1bed49b3a1ea2e721443261903d2f5755
26,556
def is_file_wanted(f, extensions): """ extensions is an array of wanted file extensions """ is_any = any([f.lower().endswith(e) for e in extensions]) return is_any
c84250126c9700966248b969ded3121ae2c96764
26,558
def _parse_names(last_name_dict): """Helper function to unpack the data when grouped by last name letter """ big_list = [] for last_letter, people_with_last in last_name_dict.items(): for person in people_with_last: big_list.append(person) return big_list
7157476a2128bd183a8fac2540c3e2d9f1812760
26,559
def hexdump(data): """Return a hexdump of the given data. Similar to what `hexdump -C` produces.""" def is_hexdump_printable(b): return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>' lines = [] chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16)) for i, chunk in enumerate(chunks): hexblock = ['{:02x}'.format(b) for b in chunk] left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:]) asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk) lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock)) return '\n'.join(lines)
cf72c8b5855d8f99364891fbc416f63dc406942a
26,560
from dateutil.parser import parser from datetime import datetime def datetime_string(string, custom_format=None): """ Takes a string and parses it into a datetime object with the dateutil module if present. If not it will fall back to a more rudimentary method of using strptime with a list of predefined formats with the option of passing in a custom format to try before the others. The first one to parse correctly gets returned. """ try: # noinspection PyUnresolvedReferences return parser().parse(string) except ImportError: string = string.replace('/', '-') formats = [ '%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H', '%Y-%m-%d %I %p' '%Y-%m-%d %H:%M', '%Y-%m-%d %I:%M %p' '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %I:%M:%S %p' ] if custom_format: formats.insert(0, custom_format) for f in formats: try: return datetime.strptime(string, f) except ValueError: continue raise ValueError('The string did not match any configured format')
d0ac85bb76cef4ff8585fd67de61863438afacd5
26,561
from typing import Tuple def get_namespace_and_name_from_role(role: str) -> Tuple[str, str]: """ Extract namespace and name for a role. Args: role (str): role in the form {{role_namespace}}.{{role_name}} Returns: Tuple[str, str]: namespace, name """ # role comes in the form {{role_namespace}}.{{role_name}}, so split by . role_data = role.split(".") role_namespace, role_name = role_data[0], role_data[1] return role_namespace, role_name
c4682d8457b49c12fc7bf01279f8cd5583eea13c
26,564
def initialize_graph(graph, graph_info): """Initializes a graph according to given graph_info""" graph.uri = graph_info.uri graph._name = graph_info.name return graph
204942f44c55f5066c3c18412b4b1b71c99e2186
26,566
def _process_axes_functions(axes, axes_functions): """Process axes functions of the form `axes.functions(*args, **kwargs).""" if axes_functions is None: return None output = None for (func, attr) in axes_functions.items(): axes_function = getattr(axes, func) # Simple functions (argument directly given) if not isinstance(attr, dict): try: out = axes_function(*attr) except TypeError: out = axes_function(attr) # More complicated functions (args and kwargs given) else: args = attr.get('args', []) kwargs = attr.get('kwargs', {}) # Process 'transform' kwargs if 'transform' in kwargs: kwargs['transform'] = getattr(axes, kwargs['transform']) out = axes_function(*args, **kwargs) # Return legend if possible if func == 'legend': output = out return output
7c8d5bfcdd5756a9c0e02fe7010748c961b7ecb5
26,568
import os def loadCSVs(directory): """In a given directory, finds all CSV files not named template and returns an array of names""" CSVs = [] print(directory) with os.scandir(directory) as files: for entry in files: if not entry.name.startswith('.') and not entry.name.startswith("TEMPLATE") and entry.is_file() and entry.name.endswith('.csv'): CSVs.append(entry.name) print('Found: ' + entry.name) return CSVs
ac25070a0d10a510b6817ba7c8468b2a2da291d8
26,569
def issues_under_ballot_items_retrieve_doc_template_values(url_root): """ Show documentation about issuesUnderBallotItemsRetrieve """ optional_query_parameter_list = [ { }, ] potential_status_codes_list = [ ] try_now_link_variables_dict = { } api_response = '[{\n' \ ' "success": boolean,\n' \ ' "status": string,\n' \ ' "issues_under_ballot_items_list": list\n' \ ' [\n' \ ' "ballot_item_we_vote_id": string,\n' \ ' "issue_we_vote_id_list": list\n' \ ' [\n' \ ' "issue_we_vote_id": string,\n' \ ' ],\n' \ ' ],\n' \ '}]' template_values = { 'api_name': 'issuesUnderBallotItemsRetrieve', 'api_slug': 'issuesUnderBallotItemsRetrieve', 'api_introduction': "", 'try_now_link': 'apis_v1:issuesUnderBallotItemsRetrieveView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } return template_values
f7c276524c0901653bb5d941e9cc3dc64838677b
26,570
import base64 def get_headers(username, password): """Formats the header to contain username and password variables """ login_string = "{}:{}".format(username, password) # Python 2.7 and 3.6 support base64_login = base64.b64encode(str.encode(login_string)) str_base64_login = base64_login.decode("utf-8") return { "Authorization": "Basic {}".format(str_base64_login), "Content-Type": "application/x-www-form-urlencoded", "Cache-Control": "no-cache" }
983c8f57c393b51bceae4d941cb620d6713b5650
26,571
def parse_request(request, listOfSelectedParameter): """ -> Parse result of a request and return only the paramaters present in listOfSelectedParameter. -> Request is a dict generated by a search() operation from the tinyDB package -> listOfSelectedParameter is a list of selected parameters -> return a list of dict, each dict is a vector with paramaters present in listOfSelectedParameter. """ structureToReturn = [] for patient in request: vectorToReturn = {} for parameter in listOfSelectedParameter: vectorToReturn[parameter] = patient[parameter] structureToReturn.append(vectorToReturn) return structureToReturn
fd095c7ea7a964a70786e4ee8b827b5bc5764602
26,572
def stop_iteration(): """End of iterations.""" try: next(iter([])) except StopIteration: return "empty list"
45a25d504ee77143e54299c21c8da7b382a2ece9
26,573
def get_rpi_hostname(self): """ Returns hostname from the Pi :returns: String containing hostname of the Pi """ hostname = self.run_command_get_output('hostname').replace("\n", "") return hostname
5ea2bc35952974b65a5e74fb4f7bb4015c50f500
26,574
def loss_partial(elements, target_a, target_b): """ compute only for target_a and target_b, and without scale by 1/l @return at the scale: Loss * l / 2 """ ret = 0 l = elements.shape[0] for i, i_inv in [[target_a, target_b], [target_b, target_a]]: for j in range(l): if elements[i, j] > 0: if i > j: ret += (i-j) * elements[i, j] elif i < j: if i_inv!=j: ret += (j - i) * elements[i, j] return ret
d10f1dde2889a0947a60dfd436b9e9685dcdf292
26,577
def get_valid_and_invalid_lists(in_list, checker): """ Take a list of strings and return two lists valid_pwds, invalid_pwds based on checker. signature of checker(password, left, right, in_char) """ valid_pwds = [] invalid_pwds = [] for line in in_list: rule, password = line.split(":") config, in_char = rule.split(" ") left, right = config.split("-") left = int(left) right = int(right) password = password.strip() in_char = in_char.strip() is_valid = checker(password, left, right, in_char) if is_valid: valid_pwds.append(password) else: invalid_pwds.append(password) return valid_pwds, invalid_pwds
6e500e7357957028d8c8161be1234c2f4b7fa02e
26,578
def insert(position, element, list): """Inserts the supplied element into the list, at the specified index. Note that this is not destructive: it returns a copy of the list with the changes. No lists have been harmed in the application of this function""" return list[:position] + [element] + list[position:]
e4e6f8836cde4cf4ea4aaf3741afe12f7587f991
26,579
from typing import List def crimsplit(long_string: str, break_char: str, limit: int = 2000) -> List[str]: """Break a string.""" list_of_strings = [] while len(long_string) > limit: # find indexes of all break_chars; if no break_chars, index = limit index = [i for i, brk in enumerate(long_string) if brk == break_char] if index == [] or max(index) < limit: index.append(limit) # find first index at or past limit, break message for ii in range(0, len(index)): if index[ii] >= limit: list_of_strings.append(long_string[:index[ii - 1]].lstrip(' ')) long_string = long_string[index[ii - 1]:] break # back to top, if long_string still too long # include last remaining bit of long_string and return list_of_strings.append(long_string) return list_of_strings
26256eb1bf8c2253b50aaab1993d33f5da5c6a96
26,580
import itertools def _encodeMode(ori_str, mode_dict): """Replace the ori_str to corresponding mode in mode_dict""" for mode, value in mode_dict.items(): ori_str = ori_str.replace(mode, value) # join adjacent same mode return "".join(i for i, _ in itertools.groupby(ori_str.replace(",", "")))
7f214b62d3c23a7e754fe6d74c9ce26b2c3d4c5f
26,582
from warnings import warn def _get_n_components(n_components, n_features): """Gets n_components from param and features""" if n_components is None: n_components = sum(n_features) elif n_components == "min": n_components = min(n_features) elif n_components == "max": n_components = max(n_features) elif n_components > sum(n_features): warn("Requested too many components. Setting to number of features") n_components = min(n_components, sum(n_features)) return n_components
71a7af30a49d241389dd019c246d5b319750bc6f
26,583
import os import stat def validWorkingFile(file, overwriteZeroByteFiles = False): """ Determine if the specified file path is a valid, existing file in the WORKING_DIR """ # Overwrite (return True) 0 byte segment files if specified if os.path.exists(file) and \ (os.stat(file)[stat.ST_SIZE] != 0 or not overwriteZeroByteFiles): return True return False
5f825fa91fd5b9ffe2eb3c4989acfc9cf21b0084
26,584
def deserialize(param, n_movie, n_user, num_features): """into ndarray of X(1682, 10), theta(943, 10)""" return param[:n_movie * num_features].reshape(n_movie, num_features), \ param[n_movie * num_features:].reshape(n_user, num_features)
95a8b7dd06a3fdde30d1a1dba91062eaaa0a20d4
26,585
import math def count_digits(n): """ Count digits in integer """ if n > 0: digits = int(math.log10(n)) + 1 elif n == 0: digits = 1 else: digits = int(math.log10(-n)) + 1 return digits
03e5f041e096f2137153418f99349f3e8e844d41
26,589
def reorder_column_list(column_list_to_reorder, reference_column_list): """Keep the target list in same order as the training dataset, for consistency of forecasted columns order""" reordered_list = [] for column_name in reference_column_list: if column_name in column_list_to_reorder: reordered_list.append(column_name) return reordered_list
018f05099d662d399e8a8f7bd8308fa4ff355c94
26,590
import pickle def message_encode(msg): """ Encodes the message object. This method compresses the message payload and then serializes the whole message object into bytes, using pickle. Args: msg: the message to encode. """ msg.payload.encode() return pickle.dumps(msg)
5064c58c5681ca5da93f9caa87ff1a936002f5da
26,592
async def override_record(record, existing_record, overrides): """ Removes fields from record if user has overriden them on airtable. Args: record (``dictionary``): Record from which fields will be removed if overwritten. existing_record (``dictionary``): Record to check for overrides. overrides (``list``): List of dictionaries Each dictionary is composed of two items: 1. The override checkbox field name, 2. The override field name {"ref_field": "field name", "override_field": "field name"} Return: record. """ # noqa for override in overrides: ref_field = override.get("ref_field") override_field = override.get("override_field") if existing_record["fields"].get(ref_field): record["fields"][override_field] = existing_record["fields"][ override_field ] return record
6a8a1d68985ed3adb044e3a1d5370d7fddf6c2e6
26,593
def hack(subitems): """ Hack Object --- properties: hack: type: string description: it's a hack subitems: type: array items: $ref: '#/definitions/SubItem' """ return { 'hack': "string", 'subitems': [subitem.dump() for subitem in subitems] }
3c7186f475110a51bc05c8478736a53bb0aa55f2
26,600
def differentiate_polynomial(coefficients): """ Calculates the derivative of a polynomial and returns the corresponding coefficients. """ new_coeffs = [] for deg, prev_coef in enumerate(coefficients[1:]): new_coeffs.append((deg + 1) * prev_coef) return new_coeffs
dcc422e9acae53a8162f45ff78ceb7084dedf6f0
26,602
import os import subprocess def r_build_found(): """Return whether or not an R build is accessible. Works by trying to find R's home directory (aka R_HOME). First it check for an environment variable R_HOME, and and if none is found it tries to get it from an R executable in the PATH. See: https://github.com/rpy2/rpy2/blob/master/rpy2/situation.py. """ if os.environ.get("R_HOME"): return True try: _ = subprocess.check_output(("R", "RHOME"), universal_newlines=True) # pylint:disable-msg=broad-except except Exception: # FileNotFoundError, WindowsError, etc return False return True
2f855dbd94a324f88c605b48f1630351093e195e
26,603
def to_abs_deg_min_sec(value): """ return the value to the absolute value in degree, minute, seconds """ abs_value = abs(value) degrees = int(abs_value) rem = (abs_value - degrees) * 60 minutes = int(rem) seconds = round((rem - minutes) * 60, 5) return degrees, minutes, seconds
df4f3fbc45b9092c12e8afab48ee7eb782f96f63
26,604
from typing import Counter def generate_bar_type(x_data, y_data): """ 绘制分类柱状图需要的数据 :param x_data: 类别数据,列表 :param y_data: 标签数据,列表 :return: 类别集合。标签集合(按照数量的多少排序)。{类别:{标签:数量}}的字典 """ # 首先确定标签集合的顺序 la_dict = Counter(y_data) sort_dict = sorted(la_dict.items(), key=lambda x: x[1], reverse=True) new_y = [j[0] for j in sort_dict] # 获取类别的集合 type_list = sorted(list(set(x_data)), reverse=True) # 获取字典 data_dict = {h: Counter([j for i, j in zip(x_data, y_data) if i == h]) for h in type_list} return type_list, new_y, data_dict
5970f86798eca21c4defb6197526be2d52357b2c
26,605
import inspect import os def uiPath(cls): """ Return the ui path for the given widget class. :type cls: type :rtype: str """ name = cls.__name__ path = inspect.getfile(cls) dirname = os.path.dirname(path) path = dirname + "/resource/ui/" + name + ".ui" return path
6b246566b404790e4d50b3353ff03fa200eb020c
26,606
def get_songs_names(playlist): """Get names of songs in playlist to search on YT.""" songs = [] for song in playlist: song = song['track'] name = '' for artist in song['artists']: name += artist['name'] + ', ' name = name[:-2] name += ' - ' + song['name'] songs.append(name) return songs
8181e528db4130c40a3b29f91362228e3030d2fe
26,607
async def hello_world(): """Hello world endpoint for testing if FastAPI works properly""" return {"message": "Hello World, E!"}
2ed1e49952064209a0b5b5cf7184d82bbe44b1b2
26,609
def get_extent(geotransform, cols, rows): """ Return list of corner coordinates from a geotransform From Metageta and http://gis.stackexchange.com/a/57837/2910 @type geotransform: C{tuple/list} @param geotransform: geotransform @type cols: C{int} @param cols: number of columns in the dataset @type rows: C{int} @param rows: number of rows in the dataset @rtype: C{[float,...,float]} @return: coordinates of each corner """ ext = [] xarr = [0, cols] yarr = [0, rows] for px in xarr: for py in yarr: x = geotransform[0] + (px * geotransform[1]) + (py * geotransform[2]) y = geotransform[3] + (px * geotransform[4]) + (py * geotransform[5]) ext.append([x, y]) yarr.reverse() return ext
56bd7ae9e78f8892e37918c03ce6c7f3976f612d
26,610
def GetQueryFields(referenced_fields, prefix): """Returns the comma separated list of field names referenced by the command. Args: referenced_fields: A list of field names referenced by the format and filter expressions. prefix: The referenced field name resource prefix. Returns: The comma separated list of field names referenced by the command. """ if not referenced_fields: return None return ','.join(['nextPageToken'] + ['.'.join([prefix, field]) for field in referenced_fields])
40d928cb6b07fcc66fe257306e4d6b50753f7d7b
26,612
def qfactor_dielectric(tand): """Calculate Q-factor due to dielectric filling. Args: tand: loss tangent Returns: Q-factor """ return 1 / tand
c4ef6818deb8b6617657eec38b020d4b73a7d905
26,613
def dot2transcripts(dotpath): """Convert a .dot file to a dictionary of transcriptions. Parameters ---------- dotpath: str Full path to a .dot transcription file. Returns ------- transcripts: dict of str transcripts[condition][speaker ID][utterance ID] = transcript """ transcripts = {} with open(dotpath) as fp: for line in fp.readlines(): line = line.strip().split() # Template # <transcription> <(utterance id)> trans, uid = ' '.join(line[:-1]), line[-1][1:-1] transcripts[uid] = trans.upper() return transcripts
3905114a67372195e4c4775b3c88529f102802f8
26,614
def flatten(texts): """ Flattens list of lists params: texts: list of lists return: flattened list """ flattened_list = [item for items in texts for item in items] return flattened_list
a4427d4389e44d600d1b81fcba3609ee8ea4b14b
26,615
def CountsToFloat(counts, bits=9, vmax=2.5, vmin=-2.5): """Convert the integer output of ADC to a floating point number by mulitplying by dv.""" dv = (vmax-vmin)/2**bits return dv*counts
5f68bdf525701fe5ef0379a2c12714aa0e0cd013
26,616
import os def is_developer_mode() -> bool: """ Check if developer mode is activated. :return: True if developer mode is active, otherwise False """ return False if os.getenv("SQLTASK_DEVELOPER_MODE") is None else True
0988695842a9d67229e3b5162aed443fcb9ccc4e
26,617
def _make_retry_timeout_kwargs(retry, timeout): """Helper for methods taking optional retry / timout args.""" kwargs = {} if retry is not None: kwargs["retry"] = retry if timeout is not None: kwargs["timeout"] = timeout return kwargs
15e106b7da47b4b23d7406cffd573b75805880b9
26,618
from typing import Any from typing import Optional def _strat_from_trace_distance_bound_method(val: Any) -> Optional[float]: """Attempts to use a specialized method.""" getter = getattr(val, '_trace_distance_bound_', None) result = NotImplemented if getter is None else getter() if result is None: return None if result is not NotImplemented: return min(1.0, result) return NotImplemented
0c1ebf15584ded42c37ec737dff6e36758e82685
26,619
import math def exactPrimeFactorCount(n): """ >>> exactPrimeFactorCount(51242183) 3 """ count = 0 if n % 2 == 0: count += 1 while n % 2 == 0: n = int(n / 2) # the n input value must be odd so that # we can skip one element (ie i += 2) i = 3 while i <= int(math.sqrt(n)): if n % i == 0: count += 1 while n % i == 0: n = int(n / i) i = i + 2 # this condition checks the prime # number n is greater than 2 if n > 2: count += 1 return count
2fcbbf4168aafb5da9a767f4b301d48199fc89f1
26,620
def legend_identifier(legends): """ >>> legend_identifier([("http://example/?", "foo"), ("http://example/?", "bar")]) 'http://example/?foohttp://example/?bar' :param legends: list of legend URL and layer tuples """ parts = [] for url, layer in legends: parts.append(url) if layer: parts.append(layer) return ''.join(parts)
0ad2534d3205d06419af7bea4ba7c08df9c6c75d
26,622
def convert_units(distance, input_units, output_units): """Convert units of distance (miles, meters, and kilometers).""" conversion_factors = { 'miles': { 'miles': 1.0, 'meters': 1609.34, 'kilometers': 1.60934 }, 'kilometers': { 'kilometers': 1.0, 'meters': 1000.0, 'miles': 0.621371, }, 'meters': { 'meters': 1.0, 'miles': 0.000621371, 'kilometers': 0.001 } } allowed_units = conversion_factors.keys() if not all(x in allowed_units for x in (input_units, output_units)): raise ValueError('Invalid units provided. Should use "miles", ' '"kilometers", or "meters".') return distance*conversion_factors[input_units][output_units]
ff46176552bb1ba06694bd9280e0e45b9137d2ed
26,624
def create_criteria(label, help_txt, crit_type, standalone_impact, disable_processing, section, input_type, rating, universe, mem_funcs, dtypes, init_value=None, max_value=None, min_value=None ): """ Create criterion JSON format from input values @retuns criterion JSON structure """ crit = {"label": label, "universe": universe, "mem_funcs": mem_funcs, "rating": list(rating.keys()), "type": crit_type, "dtypes": dtypes, "rules": { "standalone_impact": standalone_impact, "disable_processing": disable_processing }, "frontend": { "type": input_type, "section": section, "help": help_txt, "rating": rating }} if input_type not in ["list", "text"]: assert init_value is not None, "Initial value for frontend must be given for number/range inputs." assert max_value is not None, "Max value for frontend must be given for number/range inputs." assert min_value is not None, f"Min value for frontend must be given for number/range inputs. ({min_value})" crit["frontend"]["initialValue"] = init_value crit["frontend"]["max"] = max_value crit["frontend"]["min"] = min_value crit["frontend"]["range_min"] = list(rating.values())[0] crit["frontend"]["range_max"] = list(rating.values())[-1] return crit
0cf23b6929b4dcc5a64736b389eb2a149ff23fac
26,625
import os def _collect_data(directory, input_ext, target_ext): """Traverses directory collecting input and target files.""" # Directory from string to tuple pair of strings # key: the filepath to a datafile including the datafile's basename. Example, # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (input_filepath, target_filepath) data_files = dict() for root, _, filenames in os.walk(directory): input_files = [filename for filename in filenames if input_ext in filename] for input_filename in input_files: basename = input_filename.strip(input_ext) input_file = os.path.join(root, input_filename) target_file = os.path.join(root, basename + target_ext) key = os.path.join(root, basename) assert os.path.exists(target_file) assert key not in data_files data_files[key] = (input_file, target_file) return data_files
ff01ca0c4ba20d57076c0750816097b15816b24b
26,626
def valid_move(moves): """ Valid Move checks if a position on the board is taken. Input: all moves Returns True if board position not taken. False if board position already taken. """ for move in reversed(moves): if moves.count(move) > 1: return False else: return True
05c26e2fa92c478d7df8c3e5a1eac7e1c580603e
26,627
def get_num_hshtg_context(): """ Returns a dictionary of hashtags to their counts in the training data """ train_file = './Data/train.csv' tags2counts = {} f = open(train_file) for line in f: l = line.strip('\n').split('\t') text, user, hashtags = l[0], l[1], l[2:] for hashtag in hashtags: if len(hashtag) == 0: continue if hashtag not in tags2counts: tags2counts[hashtag] = 0 tags2counts[hashtag] += 1 counts2tags = {} for tag, count in tags2counts.items(): if count not in counts2tags: counts2tags[count] = set() counts2tags[count].add(tag) # Uncomment this code to print out the frequency of hashtags per count # count_list = [] # for i in range(50): # if i in counts2tags.keys(): # val = len(counts2tags[i]) # else: # val = 0 # print(i, "\t", val) return tags2counts, counts2tags
02ae56c2b2d0b6e8079e9e00d7ca655525ccb620
26,630
import json def ansyAuthor(data): """分析json文件""" ids=[] with open(data,'r') as fp: for line in fp.readlines(): item=json.loads(line) if item['author_id'] not in ids: ids.append(int(item['author_id'])) return ids,max(ids)
c9855342663ff900bbe2bdf44d260f0cae10dcca
26,632
def safe_get_from_list(data,attr,default_value): """ Returns data['attr'] if attr is in data, else returns default_value """ return data[attr] if attr in data.keys() else default_value
509714de5ced48df33539d5c31db633965beff6e
26,634
import math def check_approx_equals(expected, received): """ Checks received against expected, and returns whether or not they match (True if they do, False otherwise). If the argument is a float, will do an approximate check. If the arugment is a data structure will do an approximate check on all of its contents. """ try: if type(expected) == dict: # first check that keys match, then check that the # values approximately match return expected.keys() == received.keys() and \ all([check_approx_equals(expected[k], received[k]) for k in expected.keys()]) elif type(expected) == list or type(expected) == set: # Checks both lists/sets contain the same values return len(expected) == len(received) and \ all([check_approx_equals(v1, v2) for v1, v2 in zip(expected, received)]) elif type(expected) == float: return math.isclose(expected, received, abs_tol=0.001) else: return expected == received except Exception as e: print(f'EXCEPTION: Raised when checking check_approx_equals {e}') return False
db1f8586877d350b1ea8571de238ccfc1d15aa10
26,635
def compute_helpful_vars(job_dict, dirs): """ Helper function, that adds what (I see as) helpful variables to your job dictionary. :param job_dict: a dictionary representing a row. for example, if you had a csv file with rows [sub,ses,task,run,order_id], and also defined globals [conda_env_path, matlab_path], you would get a dict { sub: NIDA2322 ses: 1, task: "rest", run: 2, order_id:5, conda_env_path:"/project2/conda/myenv", matlab_path:"/usr/bin/matlab" } :param dirs: output of ..utils.io:calculate_directories() :return: augmented job_dict """ job_dict["job_id"] = "%05d" % job_dict["order_id"] # for bids if "run" in job_dict.keys(): job_dict["run_id"] = "%02d" % job_dict["run"] return job_dict
523f77b8882dc19ce184a1674ad83ab5836bc556
26,636
import os def init_trial_path(logdir): """Initialize the path for a hyperparameter setting """ os.makedirs(logdir, exist_ok=True) trial_id = 0 path_exists = True path_to_results = logdir + '/{:d}'.format(trial_id) while path_exists: trial_id += 1 path_to_results = logdir + '/{:d}'.format(trial_id) path_exists = os.path.exists(path_to_results) save_path = path_to_results os.makedirs(save_path, exist_ok=True) return save_path
23ffdfd9f9202949ce6d9680db05a900e343d550
26,637
def get_arbitrage_opportunity(df): """function to create column showing available arbitrage opportunities""" # assuming the total fees are 0.55%, if the higher closing price is less # than 0.55% higher than the lower closing price... if df['pct_higher'] < .55: return 0 # no arbitrage # if exchange 1 closing price is more than 0.55% higher # than the exchange 2 closing price elif df['higher_closing_price'] == 1: return -1 # arbitrage from exchange 2 to exchange 1 # if exchange 2 closing price is more than 0.55% higher # than the exchange 1 closing price elif df['higher_closing_price'] == 2: return 1
4840af4e2a9d14dab9fe8b5fa08d058e83b1a969
26,638
def to_tuple(tensor): """ Convert tensor to tuple. Args: tensor (torch.Tensor): any tensor Returns: tup (tuple): tuple form """ tup = tuple(tensor.cpu().tolist()) return tup
07dc7e0e11f86331f31fbca8b9cb43b2a36a3846
26,639
from typing import BinaryIO import mimetypes import base64 def bytes_to_datauri(fp: BinaryIO, name): """Convert a file (specified by a path) into a data URI.""" mime, _ = mimetypes.guess_type(name) fp.seek(0) data = fp.read() data64 = b"".join(base64.encodebytes(data).splitlines()) return "data:%s;base64,%s" % (mime, data64.decode("utf-8"))
5390313a0b5b6aa401e1ef8ccc3700db9a33adb0
26,640
def well_type_from_position(df): """Assign the WellType from the position on the plate. Controls are in column 11 and 12""" result = df.copy() result["WellType"] = "Compound" result["WellType"][(result["plateColumn"] == 11) | (result["plateColumn"] == 12)] = "Control" return result
e89bc4f3e05947ef977c46daf3d0f1ae6bb402a7
26,642
def t_name(key): """ Rename the feature keys so that they don't clash with the raw keys when running the Evaluator component. Args: key: The original feature key Returns: key with '_xf' appended """ return key + '_xf'
04348282ee3e3139cb6ce9f2d66cd821afbfa9fa
26,645