content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import subprocess
def execute_script_on_host(target, target_path, script_path):
"""Runs python script on remote host.
This script should search the target_path
for bare Git repos and print them to standard
out.
Returns
-------
list of str
Git repo names (not paths!).
"""
completed_process = subprocess.run(
["ssh", target, f"cd {target_path}; python3 {script_path}"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
)
repos_names = completed_process.stdout.split(",")
repos_names[-1] = repos_names[-1].strip() # e.g. 'repo1,repo1 - Copy\n'
return repos_names | d6496c06fde4f157c7421d751465f3d052e82744 | 38,398 |
import re
def standardize(s):
"""
字符串标准化
去除所有空格
去掉末尾最后一个 的
小写转大写
中文字符替换: (),【】:“”’‘;
:param s:
:return:
"""
s = re.sub(r'\s+', '', s)
s = re.sub(r'的$', '', s) # 去掉末尾最后一个 的
s = re.sub(r',未特指场所$', '', s)
s = s.upper()
s = re.sub(r'(', '(', s)
s = re.sub(r')', ')', s)
s = re.sub(r',', ',', s)
s = re.sub(r':', ':', s)
s = re.sub(r'【', '[', s)
s = re.sub(r'】', ']', s)
s = re.sub(r'“|”|’|‘', '"', s)
s = re.sub(r'】', ']', s)
s = re.sub(r';', ';', s)
return s | 53beb2c588a9a69d5bff1d389f7e3b627d9577d6 | 38,399 |
def MatchTree(patterns, context):
"""
Returns all nodes, from context on down, that match the patterns
"""
state = context.copy()
# Save these before any changes are made to the context
children = context.node.childNodes
attributes = context.node.xpathAttributes or None
matched = patterns.xsltKeyPrep(context, context.node)
pos = 1
size = len(children)
for node in children:
context.node, context.position, context.size = node, pos, size
map(lambda x, y: x.extend(y), matched, MatchTree(patterns, context))
pos += 1
if attributes:
size = len(attributes)
pos = 1
for node in attributes:
context.node, context.position, context.size = node, pos, size
map(lambda x, y: x.extend(y),
matched, patterns.xsltKeyPrep(context, node))
pos += 1
context.set(state)
return matched | 38d5c16e8653ebda3430b7079db655ca6d35ed7c | 38,400 |
import numpy as np
def cnr(mean_gm, mean_wm, std_bg):
"""Calculate Contrast-to-Noise Ratio (CNR) of an image.
- CNR = |(mean GM intensity) - (mean WM intensity)| /
(std of background intensities)
:type mean_gm: float
:param mean_gm: The mean value of the gray matter voxels.
:type mean_wm: float
:param mean_wm: The mean value of the whiet matter voxels.
:type std_bg: float
:param std_bg: The standard deviation of the voxel intensities of the
background (outside the head) voxels.
:rtype: float
:return: The contrast-to-noise (CNR) ratio.
"""
cnr_val = np.abs(mean_gm - mean_wm)/std_bg
return cnr_val | 5f11714119fc794db12bd408134860ea83555379 | 38,401 |
import os
import sys
def app_name():
"""
Returns application script name.
.. versionadded:: 9.1
"""
return os.path.normpath(sys.argv[0]) | 1f4135fec36adfc9d7ba24e88779ea9d17b32478 | 38,402 |
def enable_checking(flag = True):
"""Convenience function to set the checking_enabled flag. Intended
for use in an assert statement, so the call depends on -o flag.
"""
global checking_enabled
checking_enabled = flag
return checking_enabled | f3a7d2838b891f1d2409dcf2a61fc7ea259ebdd5 | 38,404 |
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Anything else will fail the test.
"""
res = client.get_companies_guid()
if res.status_code == 200:
return 'ok', None, None
else:
res_json = res.json()
error_response = res_json.get('detail')
raise Exception(f"Failed to execute test_module. Error Code: {res.status_code}.Error "
f"Response: {error_response}") | b33d3eba12a600bd22d9712285a4e89bb0c46576 | 38,405 |
def model_class_name_to_constructor(model_class_name):
"""
This function is to serve as a template for future translation
modules. This should be a mapping from the 'model_class_name'
field to the correct model constructor.
"""
return model_class_name | 42b2d56d354205c9451d1f2b39d476e924e9fbfe | 38,407 |
def startswithlow(x, start, startlow=None):
"""True if x starts with a string or its lowercase version. The lowercase
version may be optionally be provided.
"""
if startlow is None:
startlow = start.lower()
return x.startswith(start) or x.lower().startswith(startlow) | 18b0b19cfaa62e0c2d2a3350226e167cb8065e8c | 38,408 |
def preprocess_splitted_text(text, sum_model):
"""preprocess splitted text to required input format for summarizer model.
Args:
text (str): splitted preprocessed corpus which is going to be tokenized.
mcq (SummarizeModel): instance of SummarizeModel.
Returns:
tupe[str, str]: tuple of tokens and attention masks.
"""
encode = sum_model.tokenize_corpus(text)
return encode["input_ids"], encode["attention_mask"] | 35f77f86c0bd65be2b3130009a2a9ace72a960c5 | 38,409 |
def apply_status(records, field="traffic_report_status", status="ARCHIVED"):
"""Summary
Args:
records (TYPE): Description
field (str, optional): Description
status (str, optional): Description
Returns:
TYPE: Description
"""
for record in records:
record[field] = status
return records | 21e8cdf71cc915439dd9cb9219923e0d35b60693 | 38,411 |
def PermutationSingle(liste):
"""Wie Permutation, aber falls (name, name2) schon existiert, wird (name2, name) nicht noch hinzugefügt"""
tupelListe = []
for name in liste:
for name2 in liste:
if name != name2:
if (name2, name) not in tupelListe:
tupelListe.append((name, name2))
return tupelListe | 2235261d2ce8fc0326d1bd31f2dc1be8441c4def | 38,412 |
import os
import subprocess
def create_par(target_dir):
"""Creates a par2 archive with the provided name"""
os.chdir(target_dir)
filename = '{0}.par2'.format(os.path.basename(target_dir))
par2 = subprocess.call(['par2', 'create', '-r10', '-t+', '-n1', '-q', '-q', filename, '*'])
return par2 | adafde52c83900357dee39ad5f23d950eafad0ed | 38,415 |
import string
def ignore_whitespace(a):
"""
Compare two base strings, disregarding whitespace
Adapted from https://github.com/dsindex/blog/wiki/%5Bpython%5D-string-compare-disregarding-white-space
"""
WHITE_MAP = dict.fromkeys(ord(c) for c in string.whitespace)
return a.translate(WHITE_MAP) | 9f90e0cd10744fe337f9469913654d0dd951f2a5 | 38,416 |
import subprocess
def git_branch(git_clone_dir: str) -> str:
"""Run % git branch; and return name of current branch.
:raises: `RuntimeError`
:raises: `ValueError`
"""
git_branch_proc = subprocess.Popen(
['git', 'branch', '--no-color'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=git_clone_dir,
)
out, err = git_branch_proc.communicate()
retcode = git_branch_proc.returncode
if retcode != 0:
raise RuntimeError(
'git branch has returned {:d}, err: {:s}'.format(
retcode, err.decode('utf-8')
)
)
for line in out.decode('utf-8').splitlines():
if line.startswith('*'):
return line.strip().split(' ', 1)[1]
raise ValueError('Failed to get branch name.') | d7fa3605a752518aa98849c16abd84510d369498 | 38,417 |
import os
def get_test_db_file() -> str:
"""
Generates a random test db with the PID in it
"""
return "messages_test." + os.urandom(10).hex() + str(os.getpid()) + ".db" | c3a8fd8c23b1071344a054dfa289b428f1f49f25 | 38,419 |
import argparse
import pathlib
def _create_parser() -> argparse.ArgumentParser:
"""Create the CLI parser."""
parser = argparse.ArgumentParser(prog="ondiff")
parser.add_argument(
"path", type=pathlib.Path, help="Path to a file or directory to watch."
)
parser.add_argument(
"-c", "--command", type=str, help="Command to run upon change.", required=True,
)
return parser | df8fea713b99c6a2b40952c1148fbd8c79292f8e | 38,420 |
import yaml
def get_table_schema(table_name:str) -> list:
"""Get the columns to filter by for a pandas Dataframe"""
table_schemas = yaml.safe_load(open('./models/source/schema.yml'))
return [col["name"] for col in table_schemas[table_name]["columns"]] | 247c834501181da1b6f7700bfb6bcaed532c05ee | 38,421 |
def word_to_put_req(word_vectors_map, word):
"""
Translate a word to a PUT request to be sent to DynamoDB
"""
return {
'PutRequest': {
'Item': {
'word': {
'S': word
},
'vector': {
'L': [{'N': str(n)} for n in word_vectors_map[word]]
}
}
}
} | 1734463d60869c4c51ba40f61ad19e16ead75345 | 38,422 |
import time
def iso_time(value=None):
"""Format a timestamp in ISO format"""
if value == None:
value = time.localtime()
tz = time.timezone / 3600
return time.strftime('%Y-%m-%dT%H:%M:%S-', value) + '%(tz)02d:00' \
% vars() | 889a94f6a3fcbc088726fab4eaac36aff5057f4c | 38,423 |
def lt_getid(query_locus_tags, refdf):
"""
Get protein id for query locus tags.
"""
fq_mapping = {}
refmap = list(zip(refdf['protein_id'], refdf['locus_tag'], refdf['old_locus_tag']))
# CAVEATS: case-sensitive, overwrites value in case of duplicates
for q in query_locus_tags:
fq_mapping.update(
#[f(x, y) for x, y in zip(df['col1'], df['col2'])]
#{q:row[0] for row in refmap if q in row[1] or q in row[2]}
# PROBLEM DISCOVERED: this will accept half-matches (ie. Rv0063 matches Rv0063a)
# attempted solution:
{q:row[0] for row in refmap
if any([i == q for i in row[1].split(', ')])
or any([i == q for i in row[2].split(', ')])}
)
return fq_mapping | 7c9b48fce0c6c63b4f8439b05b501ce2be5eaa4d | 38,424 |
import re
def str_strip_text(string):
"""str_strip_text(string) strip \b and \n literals off the string."""
return re.sub("\x08.", "", string.replace("\n", "")) | 0bbe3711c3903c4b01084e6e70a144bbfceb0736 | 38,425 |
def key_prefix(key):
"""Returns the key prefix that can be used to find the latest key with that given prefix."""
return key.split('{')[0] | fe6c3390ebe7a183b790d9743b3e0d6ef90113cb | 38,426 |
import re
def extract_tag(etree_tag):
"""An etree tag comes in the form: {namespace}tag. This returns the tag"""
match = re.match(r'(?:\{.+\})?(.+)', etree_tag)
if not match:
return ""
return match.group(1) | 78e780543b95e36116dfcbad78323e73b6e6fea4 | 38,428 |
import os
def lipo_info(path):
"""
Input path got arch info
:param path:
:return:
"""
with os.popen("lipo -info \"{}\"".format(path)) as result:
arch = result.read()
arch = arch.strip()
arch = arch.replace(path + " ", "")
colon = arch.rfind(":")
if colon >= 0:
arch = arch[colon + 1:].strip()
return arch | 65079989dd6d11c81dcb20d6b67f2c02a818dc0e | 38,429 |
import os
import json
from collections import namedtuple
def live_key():
"""
runs only locally ; since the service have IP restrictions it most likely
won't run on Travis
"""
if os.name == "nt":
Cred = namedtuple("Cred", ["endpoint", "client_key"])
fh = os.path.join(os.environ["USERPROFILE"], ".bpl-solr/bpl-solr-prod.json")
with open(fh, "r") as file:
cred = json.load(file)
return Cred(cred["endpoint"], cred["client_key"]) | ffe8cec1a920b0b13a51d97ec4cfe622953b1b85 | 38,430 |
def get_systeminfo(resource, config, interactive=False):
"""."""
return {'ohai': 'there!'} | 65e5981b07cb9f9d3697fadcc8d6d4c9f328fb57 | 38,431 |
import random
def train_test_split(df, test_size):
"""Randomly selects data, and splits it based upon
the specified test size.
Usage: `train_test_split(df, test_size)`
Returns training and testing data.
If a float is input as test_size, it's treated as percentage."""
# If our test_size is a float, we're going to treat it as a percentage.
# We need to calculate and round, because k below requires an integer.
if isinstance(test_size, float):
test_size = round(test_size * len(df))
# Sampling test indices.
test_idx = random.sample(population=df.index.tolist(), k=test_size)
train_df = df.drop(test_idx) # Training data / test indices dropped.
test_df = df.loc[test_idx] # Testing data / only test indices.
return train_df, test_df | 1b010ebc47ad900f520f570b7918d8de74e19895 | 38,432 |
import os
def get_package_version(overwritten_package_version, gw_dir):
"""Returns the correct version for genomeworks python package.
In case the user didn't overwrite the package name returns GW version found in VERSION file otherwise,
returns the overwritten package name
"""
if overwritten_package_version is not None:
return overwritten_package_version
# Get GW version from VERSION file
with open(os.path.join(gw_dir, 'VERSION'), 'r') as f:
return f.read().replace('\n', '') | c77856150a3fbb56effcd14fde15949eb281f489 | 38,433 |
def make_trie(str_set):
"""
トライ木
もうちょいいい感じにしたい
https://atcoder.jp/contests/arc087/submissions/5329903
:param collections.Iterable[str] str_set:
:rtype: dict of dict
"""
trie = {}
for s in str_set:
node = trie
for c in s:
if c not in node:
node[c] = {}
node = node[c]
return trie | 968bdf095b4695b5a9d3f459642b9b84e44c99a2 | 38,434 |
def reduction_ratio(num_comparisons, all_comparisons):
"""Compute the reduction ratio using the given confusion matrix.
Reduction ratio is calculated as 1 - num_comparison / (TP + FP + FN+ TN).
Parameter Description:
num_comparisons : The number of candidate record pairs
all_comparisons : The total number of comparisons between all record
pairs
The method returns a float value.
"""
if (num_comparisons == 0):
return 1.0
rr = 1.0 - float(num_comparisons) / all_comparisons
return rr | 86cf59dfaa47803f252660b7922cf85ba5b321ad | 38,435 |
from functools import reduce
def reducer(function, sequence, default = None):
"""
reduce adds stuff to zero by defaults. This is not needed.
:Parameters:
----------------
function : callable
binary function.
sequence : iterable
list of inputs to be applied iteratively to reduce.
default : TYPE, optional
A default value to be returned with an empty sequence
:Example:
-------
>>> from operator import add, mul
>>> from functools import reduce
>>> import pytest
>>> assert reducer(add, [1,2,3,4]) == 10
>>> assert reducer(mul, [1,2,3,4]) == 24
>>> assert reducer(add, [1]) == 1
>>> assert reducer(add, []) is None
>>> with pytest.raises(TypeError):
>>> reduce(add, [])
"""
sequence = list(sequence)
if len(sequence) == 0:
return default
elif len(sequence) == 1:
return sequence[0]
else:
return reduce(function, sequence[1:], sequence[0]) | 03a4c1fd4f377e31c78b85ce983420d725526d49 | 38,436 |
import torch
def merge_second_and_third_dim(batch: torch.Tensor) -> torch.Tensor:
"""Merge the second and the third dims in a batch.
Used when flattening messages from the primitives ot the master"""
shape = batch.shape
return batch.view(shape[0], int(shape[1] * shape[2]), *shape[3:]) | d6a4277979dcefe15a1b1636cd4b8dc006417a1c | 38,438 |
import re
import argparse
def validate_guid_uuid(value):
"""Validate that a value is a GUID OR UUID. """
pattern = re.compile(
'^[0-9A-F]{8}[-]?([0-9A-F]{4}[-]?){3}[0-9A-F]{12}$',
re.IGNORECASE)
if not pattern.match(value):
raise argparse.ArgumentTypeError('value %s is not a GUID.\n' % value)
return value | b3b29f0fc01ecfc6a560cd12f0e4d7baaa228a78 | 38,439 |
def _compare_nested_sequences(seq1, seq2):
"""Compare two sequences of arrays."""
return all([(l == m).all() for l, m in zip(seq1, seq2)]) | 6fad22812883b3c7c207c9ba955ef42cbd1391b3 | 38,443 |
def is_type(valid_type):
"""Returns a validator function that succeeds only for inputs of the provided valid_type.
:param type valid_type: the type that should be considered valid for the validator
:returns: a function which returns True its input is an instance of valid_type, and raises TypeError otherwise
:rtype: callable
"""
def validate(value, should_raise=True):
if not isinstance(value, valid_type):
if should_raise:
raise TypeError(
"Expected value of type {expected}, actual value was of type {actual}.".format(
expected=valid_type, actual=type(value)
)
)
return False
return True
return validate | 84b7090671f70e84b4f259d69789153e517b8c79 | 38,444 |
def get_config_option(options, option_name, optional=False):
"""Given option_name, check and return it if it is in appconfig.
Raises ValueError if it is missing and mandatory
"""
option = options.get(option_name)
if not option and not optional:
raise ValueError('"{0}" is mandatory and is not set in the app.config file'.format(option_name))
return option | 7b8bb2cebab02ff832b01b4b88f33f4c2fe67574 | 38,445 |
def get_sentences(pos_tags, search_terms):
"""Given all words with their POS tags, and the positions of the search terms,
return list of meaningful sentences.
Note: In the case, POS tags are not of use
Args:
pos_tags (list): List of tuples ( <word> , <POS> )
search_terms (list): List of tuples ( <position_int>, <search_term> )
Returns:
list: List of meaningful sentence strings
"""
sentences = []
for pos, search_term in search_terms:
sentence = ''
while True:
sentence += ' ' + pos_tags.pop(0)[0]
if search_term in sentence:
sentences.append(sentence)
break
return sentences | d0310345bb9750488f769e0588295ca9e5fca1da | 38,446 |
def parse_usb_id(id):
""" Quick function to parse VID/PID arguments. """
return int(id, 16) | 05c22e38fa7ce813c042c1dc8e45e4b36549723d | 38,447 |
import argparse
def create_arg_parser():
"""Create argument parser for our baseline. """
parser = argparse.ArgumentParser('GMFbaseline')
# DATA Arguments
parser.add_argument('--data_dir', help='dataset directory', type=str, default='DATA/')
parser.add_argument('--tgt_market', help='specify a target market name', type=str, default='t1')
parser.add_argument('--src_markets', help='specify none ("none") or a few source markets ("-" seperated) to augment the data for training', type=str, default='s1-s2')
parser.add_argument('--tgt_market_valid', help='specify validation run file for target market', type=str, default='DATA/t1/valid_run.tsv')
parser.add_argument('--tgt_market_test', help='specify test run file for target market', type=str, default='DATA/t1/test_run.tsv')
parser.add_argument('--exp_name', help='name the experiment',type=str, default='baseline_toy')
parser.add_argument('--train_data_file', help='the file name of the train data',type=str, default='train_5core.tsv') #'train.tsv' for the original data loading
# MODEL arguments
parser.add_argument('--num_epoch', type=int, default=25, help='number of epoches')
parser.add_argument('--batch_size', type=int, default=1024, help='batch size')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate')
parser.add_argument('--l2_reg', type=float, default=1e-07, help='learning rate')
parser.add_argument('--latent_dim', type=int, default=8, help='latent dimensions')
parser.add_argument('--num_negative', type=int, default=4, help='num of negative samples during training')
parser.add_argument('--cuda', action='store_true', help='use of cuda')
parser.add_argument('--seed', type=int, default=42, help='manual seed init')
return parser | bd898139e6be53fc5621cdf93beeb3a6fdb26db8 | 38,448 |
def kernel_do_filtro(nome_filtro):
"""
Entrada: Uma string que representa um nome de um filtro
Saída: O uma matriz que representa o'Kernel' correspondente
ao nome do filtro recebido
"""
kernels_filtros = [
[
[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]
],
[
[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]
],
[
[1, 0, -1],
[2, 0, -2],
[1, 0, -1]
],
[
[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]
],
[
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04],
[0.04, 0.04, 0.04, 0.04, 0.04]
],
[
[-0.00390625, -0.015625, -0.0234375, -0.015625, -0.00390625],
[-0.015625, -0.0625, -0.09375, -0.0625, -0.015625],
[-0.0234375, -0.09375, 1.859375, -0.09375, -0.0234375],
[-0.015625, -0.0625, -0.09375, -0.0625, -0.015625],
[-0.00390625, -0.015625, -0.0234375, -0.015625, -0.00390625]
]
]
nome_filtros = [
"bordas",
"sharpen",
"left_sobel",
"emboss",
"blur",
"unsharp"
]
return kernels_filtros[nome_filtros.index(nome_filtro)] | 64c9b9f7efe43901f3440d6506ef1c03b111e6cc | 38,449 |
def split_obs(obs):
"""Split a dict obs into state and images."""
return obs['state'], obs['img'] | ebb043f2b75c2a9e12883ef8fe49595c3a751483 | 38,450 |
def engineering_string(number):
"""Format number with proper prefix"""
absnr = abs(number)
if absnr == 0:
engr_str = '%g ' % (number / 1e-12)
elif absnr < 0.99999999e-9:
engr_str = '%g p' % (number / 1e-12)
elif absnr < 0.99999999e-6:
engr_str = '%g n' % (number / 1e-9)
elif absnr < 0.99999999e-3:
engr_str = '%g µ' % (number / 1e-6)
elif absnr < 0.99999999:
engr_str = '%g m' % (number / 1e-3)
elif absnr < 0.99999999e3:
engr_str = '%g ' % (number)
elif absnr < 0.99999999e6:
engr_str = '%g k' % (number / 1e3)
elif absnr < 0.999999991e9:
engr_str = '%g M' % (number / 1e6)
else:
engr_str = '%g G' % (number / 1e9)
return engr_str | 768a310c9bf97d0151d83cb6371210d2a6e58502 | 38,451 |
def GetDataForExistingElements(list, ref_dict):
"""
gets a smaller list of type <Atom_Data> only for the present elements
"""
d = {}
for element in list:
if ref_dict.get(element):
d[element] = ref_dict.get(element)
return d | af073953c44e91df26212ed81c9be251acfd65be | 38,452 |
from typing import List
def _get_difference_by(fields1: List[str], fields2: List[str]) -> List[str]:
"""Get list with common fields used to decimate and difference given Datasets
Args:
fields1: Fields of 1st Dataset
fields2: Fields of 2nd Dataset
Returns:
List with common fields to decimate and difference given Datasets
"""
difference_by = []
common_fields = set(fields1) & set(fields2)
for field in ["time", "satellite"]:
if field in common_fields:
difference_by.append(field)
return difference_by | f80b36788c895269e41f6b2a67fe8961c58fb73c | 38,453 |
def intersect(p, q):
"""Solve the intersection between two lines p and q.
Arguments:
p: 2-tuple (a, b) representing the line y = ax + b.
q: 2-tuple (c, d) representing the line y = cx + d.
Returns:
The intersection point as a 2-tuple (x, y).
"""
(a, b), (c, d) = p, q
if a == c:
raise ValueError("parallel lines")
x = (d - b) / (a - c)
y = a * x + b
return (x, y) | f09db7a9692730d4215290f8bca5f35d66dd447b | 38,454 |
def _infer_join_clauses(tables, join_keys, t0):
"""For backward compatibility with single equivalence class joins."""
for keys in join_keys.values():
assert len(keys) == 1, join_keys
assert t0 in tables, tables
t0_idx = tables.index(t0)
k0 = join_keys[t0][0]
return [
"{}.{}={}.{}".format(t0, k0, t, join_keys[t][0])
for i, t in enumerate(tables)
if i != t0_idx
] | 44cd44eeffef5e5461d5a2f76e220299b354039f | 38,455 |
def toTuple2D(self):
"""
Extension for cadquery.Vector that provides a 2D tuple rather than a 3D tuple as provided by
Vector.toTuple().
"""
tuple_3d = self.toTuple()
return (tuple_3d[0], tuple_3d[1]) | a1fbfd385729a54cc0e3f9173b6e5d86c04b079c | 38,458 |
def to_second(dt):
"""
Truncates a datetime to second
"""
return dt.replace(microsecond = 0) | 191cb118fa35dfa3dc92e58aebeb60b4a3661beb | 38,459 |
import torch
def soft_round_inverse(
y: torch.Tensor, alpha: float, eps: float = 1e-3
) -> torch.Tensor:
"""Inverse of ``soft_round``.
This operation is described in Sec. 4.1. in the paper:
> "Universally Quantized Neural Compression"<br />
> Eirikur Agustsson & Lucas Theis<br />
> https://arxiv.org/abs/2006.09952
Args:
y:
alpha: smoothness of the approximation
eps: threshold below which ``soft_round`` returns the identity
Returns:
The inverse of ``soft_round``.
"""
maximum = torch.tensor(max(alpha, eps))
m = torch.floor(y) + 0.5
s = (y - m) * (torch.tanh(maximum / 2.0) * 2.0)
r = torch.atanh(s) / maximum
r = torch.clamp(r, -0.5, 0.5)
x = m + r
return torch.where(torch.tensor(alpha < eps), y, x) | 41c739ba8fdd9b46722568e76ee913d5ec4ef47a | 38,460 |
def connected_components(leaf_to_root):
"""Returns the number of roots in onotology
Params:
leaf_to_root, Dict[str, str] leaf_string -> root_string
Outputs:
num_roots, int
"""
num_roots = 0
for l, r in leaf_to_root.items():
if r is None:
num_roots += 1
return num_roots | 0bb5956c90f3cb1a450260058465d699410232fa | 38,461 |
import time
def transform_date(value):
"""
Date formats are required in the format of "%Y-%m-%d %H:%M:%S" (UTC). This
function transforms dates from the legacy system(s) into this format.
"""
if not value:
return None
input_formats = (
"%m/%d/%Y",
)
for try_format in input_formats:
try:
parsed_time = time.strptime(value.upper(), try_format)
return time.strftime("%Y-%m-%d %H:%M:%S", parsed_time)
except ValueError:
pass
raise ValueError("Could not parse date [{}]".format(value)) | 7e80ac62068dad750306d32d7eeb79e4122cd466 | 38,462 |
import requests
def get_metadata_from_url(url, session=None):
"""Gets an url via a requests compatible api"""
if not session:
session = requests
response = session.get(url)
return response.text | ca155213bb2bd7c66769ea031aa5893007c1c6ce | 38,464 |
def _DisposeCircularBitInfo(bitInfo, minRadius=1, maxFragment=True):
"""Dispose the bitinfo retrived from GetFoldedCircularFragment() or GetUnfoldedCircularFragment()
*internal only*
"""
allFragments = list(bitInfo.keys())
station = {}
# maxFragments = []
for idx, pairs in bitInfo.items():
for (atom, radius) in pairs:
if radius >= minRadius and (not maxFragment or \
atom not in station or \
station[atom]['radius'] < radius):
station[atom] = {'idx':idx, 'radius':radius}
maxFragments = [vals['idx'] for vals in station.values()]
return (allFragments, maxFragments) | 7f3b32f09cc56b3f80afdb95cd3fda23c566ff47 | 38,465 |
def share_zeros(x):
"""Calculate the share of 0s in a pandas.Series."""
return (x == 0).sum() / len(x) | f626695404c43a1520b023a952c733111ce289f1 | 38,466 |
import torch
def get_float_value(x):
"""Convert to float"""
if isinstance(x, torch.Tensor):
x = x.item()
elif not isinstance(x, float):
x = float(x)
return x | f81749203ff8e4ed15f4cf863e2740b166a82cd4 | 38,467 |
def linscale(d, lim):
""" utility function to linearly scale array d to the interval defined by lim """
return (d - d.min())*(lim[1] - lim[0]) + lim[0] | 31970685f19ca3295b14a37c0107d4e0b4d2a24e | 38,468 |
import math as Math
def computeTicks (x, step = 5):
"""
Computes domain with given step encompassing series x
@ params
x - Required - A list-like object of integers or floats
step - Optional - Tick frequency
"""
xMax, xMin = Math.ceil(max(x)), Math.floor(min(x))
dMax, dMin = xMax + abs((xMax % step) - step) + (step if (xMax % step != 0) else 0), xMin - abs((xMin % step))
return range(dMin, dMax, step) | 9cffef02949ab60399b20b04f804b7ef2f0ffc4a | 38,471 |
def _default_wrapper(recons_func, **kwargs):
""" Default wrapper to parallelize the image reconstruction.
"""
return recons_func(**kwargs) | 4dcbb8666e8fc4cef5df220651416a259871c2cd | 38,472 |
def sample_pts(pts, perc):
"""
Non-random deterministic shitty sampler
"""
i = 0
foo = []
while i <= 1.0 and int(i * (len(pts)-1)) < len(pts):
foo.append(pts[int(i * (len(pts)-1))])
i += perc
return foo | 66066cee426cd3a0b2c5ba07b714b65e81f19dfb | 38,473 |
def transform_point(point, direction, value):
"""
Moves point in direction
:param point: Point for movement
:type point: DB.XYZ
:param direction: Direction of movement. Vector
:type direction: DB.XYZ
:param value: The amount of movement
:type value: int or float
:return: New moved point
:rtype: DB.XYZ
"""
return point + direction * value | 996c4978be165b31b3b60c24cd6927840f6ced94 | 38,474 |
def save_graph_PTA(graph, df, ear):
"""
INPUTS
-graph: interactive plotly.graph_objects figure
-df: dataframe with the informations that were used to generate the graph
-ear: ear side linked with the graph
OUTPUTS
-saves the graph in a .html file to a subject's specific location in the
repository
"""
test = "PTA"
row = df.index[0]
sub_long = df["Participant_ID"][row]
sub_short = sub_long.lstrip("Sub")
folder = "../results/" + sub_long + "/"
path_header = folder + "Sub-" + sub_short + "_" + test + "_"
if ear == "All_runs":
path = path_header + ear + ".html"
else:
session = df["DATE"][row]
name = df["Protocol name"][row]
condition = df["Protocol condition"][row]
path = (path_header + session + "_" + name + ": " + condition
+ " (" + ear + ")" + ".html")
graph.write_html(path)
return True | 12ee2889ec621c93898f3c8e7ca5e59704a08c59 | 38,475 |
from typing import Union
def _dedup(data: Union[list, tuple]) -> list:
"""Remove duplicates from a list/tuple, then return a new clean list"""
ndata = []
for d in data:
if d in ndata: continue
ndata.append(d)
return ndata | 0d152445a8c44b14d42476f31e34727e0373a1d1 | 38,477 |
def parse_admins(customer_admins_json):
"""Takes a list of customer admins and writes a file of symplexity
logins where 2fa is not enabled"""
print("Checking for MFA and API access")
symp_2fa_disabled = []
for customer in customer_admins_json:
for admin in customer['admins']:
try:
if len(admin['email']) > 15:
email = admin['email'][admin['email'].find('@'):]
if admin['twoFactorAuthEnabled'] is False and \
(email == '@symplexity.com'
or email == '@ensi.com'):
symp_2fa_disabled.append(
{'organization': customer['name'],
'user': admin['name'],
'email': admin['email'],
'twoFactorAuthEnabled': admin['twoFactorAuthEnabled']})
except TypeError:
error = 'To make requests you must first enable API access'
if customer['admins']['errors'][0][0:49] == error:
symp_2fa_disabled.append({'organization': customer['name'],
'error': error})
return symp_2fa_disabled | 802e32c64195354319c2c0e418568d1635e0860c | 38,478 |
from typing import Dict
def sparse_dot(sp_vector1: Dict, sp_vector2: Dict) -> int:
"""calculates the dot product of two sparse vectors
>>> a = {0: 1, 1: 2, 2: 3}
>>> b = {0: 4, 1: 5, 2: 6}
>>> sparse_dot(a, b)
32
>>> c = {0: 6, 1: 5, 2: 4}
>>> d = {0: 4, 1: 5, 2: 7, 3: 15}
>>> sparse_dot(c, d)
77
"""
sum = 0
smaller_vector = sp_vector1 if len(sp_vector1) <= len(sp_vector2) else sp_vector2
bigger_vector = sp_vector2 if len(sp_vector1) <= len(sp_vector2) else sp_vector1
for pos,val in smaller_vector.items():
if pos in bigger_vector:
sum += smaller_vector[pos] * bigger_vector[pos]
return sum | 9639e196507a76bfad790baf29411c7a3f1e460c | 38,479 |
def create_list(colour):
"""utility function for creating list from colour measurements points
Args:
colour ([numpy.ndarray]): [column wise list containing measurements]
Returns:
[list]: [list of measurements ]
"""
fList = []
for o in range(0, len(colour)):
mInput = [colour[o][0], colour[o][1]]
x, y = float(mInput[0]), float(mInput[1])
fList += [(x, y)]
return fList | 89184d13956087ce5696498871223aea7415ca7b | 38,480 |
def explained_variance(preds, targets):
"""
Calculate the explained variance between predictions and targets
Args:
preds(Tensor): predictions
targets(Tensor): ground truth
Returns:
Var: explained variance
"""
return (1 - (targets - preds).var() / targets.var()).asnumpy() | eb04e0cde784bcd15e50dd0b0345ece85b687cd1 | 38,481 |
import argparse
def CommandLine():
"""Setup argparser object to process the command line"""
cl = argparse.ArgumentParser(description="Fit Squares in large Rectangle or Box) -- find fewest needed. 2018 by Paul H Alfille")
cl.add_argument("N",help="Width of large Rectangle/Box (default 13)",type=int,nargs='?',default=13)
cl.add_argument("-m","--maximum",help="Maximum size of tiling square allowed",type=int,nargs='?',default=None)
return cl.parse_args() | 5d916e8fce6474ffa2ee3d6e6887a8e80cbade51 | 38,483 |
import requests
def retrieve_url(url):
"""
Given a URL (string), retrieves html and
returns the html as a string.
"""
html = requests.get(url)
if html.ok:
return html.text
else:
raise ValueError("{} could not be retrieved.".format(url)) | 90137fbcb8c91f6c58f47da3b75fbbc524b26ce7 | 38,484 |
def uid_to_device_id(uid):
"""
Turn UID into its corresponding device ID.
"""
return uid >> 72 | c81ca0d65be1d9351716e8f9b89676755c3af191 | 38,485 |
def print_func(msg: str) -> bool:
"""
print_func()
Print a hash guarded msg
"""
length = len(msg) + 4
fullhashline = ""
for _ in range(0, length):
fullhashline += "#"
sparsehashline = "#"
for _ in range(1, (length - 1)):
sparsehashline += " "
sparsehashline += "#"
print(
"".join(
[
fullhashline,
"\n",
fullhashline,
"\n",
sparsehashline,
"\n",
"# ",
msg,
" #",
"\n",
sparsehashline,
"\n",
fullhashline,
"\n",
fullhashline,
]
)
)
return True | 18014b770e3197462bf541717b62f5e73753cd73 | 38,486 |
def validator_for_thank_you(name_input): # pragma no cover
"""Check that input is made only of letters and spaces."""
if not name_input:
return False
return True | 98dde075e5d7c17bae246652bbaf9a99f6f47bfa | 38,487 |
def GetNextLine(File):
"""Get the next line in a file ignoring lines beginning with *
a space or empty lines. If the end of file is reached None is returned
otherwise a string with the line will be returned."""
while 1:
Line = File.readline()
if len(Line) == 0:
return None
elif Line[0] == '*' or Line[0] == '\n' or Line[0] == ' ':
continue
else:
return Line | 0aa3a605df40233ce0329943f1767a371a8f861b | 38,488 |
def _log_msg(name: str, size: int) -> str: # pragma: no cover
"""Return log message for creation of file.
Args:
name (str): Name of the created file.
size (int): Size of the created file.
Returns:
str: Log message with information about the created file.
"""
i = 0
units = ['B', 'KB', 'MB', 'GB', 'TB']
while size >= 1000:
size = round(size / 1000)
i += 1
size_str = '%d%s' % (size, units[i])
return "%s | %s" % (name, size_str) | 9b4b6fb40aed78e10e11e7ddbd8905189917a0a9 | 38,489 |
def collapsed_form(form):
"""Render a collapsed form."""
return {'form': form} | 5de13024af447b8a9cf8f06afb94a325888569ad | 38,490 |
def get_beads_2_M(sigma, SI=False):
"""
Computes conversion from beads/sigma^3 to mol/L.
If SI True, sigma is expected in meters. Otherwise, in Angstroms.
"""
# Avogadro's number [molecules/mol]
NA = 6.022E23
# conversion of meters^3 to L
m3_2_L = 1E3
# conversion of meters to Angstroms [A]
A_2_m = 1E-10
if not SI:
sigma *= A_2_m # converts from A to m
# conversion from beads/sigma^3 to mol/L (M)
beads_2_M = (NA * sigma**3 * m3_2_L)**(-1)
return beads_2_M | 420ea4b681ba81cd23388053850a6916ee4e834e | 38,492 |
def packHeader(address, more_bit=False):
"""
packs address and more_bit into bytearray
returns bytes
"""
if address<16384:
#address will fit into 16 bits
b1,b2=(address>>8)&0x3F,address&0xFF
if more_bit: b1|=0x40
return bytes([b1,b2])
elif address <1073741824:
b1,b2,b3,b4=(address>>24)&0x3F,(address>>16)&0xFF,(address>>8)&0xFF,address&0xFF
if more_bit: b1|=0x40
return bytes([b1|0x80,b2,b3,b4])
else:
raise ValueError('address must be less than 1073741824') | 0e900522c562e16519f30867f8efe5b6ad34a615 | 38,494 |
import random
import string
def random_id(length=12) -> str:
"""
generates a random string using the url-safe base64 characters
(ascii letters, digits, hyphen, underscore).
therefore, each letter adds an entropy of 6 bits (if SystemRandom can be trusted).
:param length: the length of the random string, defaults to 12
:return: a random string of the specified length
"""
rand = random.SystemRandom()
base64_chars = string.ascii_letters + string.digits + '-_'
return ''.join(rand.choice(base64_chars) for _ in range(length)) | fa96ebfc407a840d52c185bd486d87b5931aa0a3 | 38,495 |
import json
def read_cn5_surface_text_from_json(input_file):
"""
Reads conceptnet json and returns simple json only with text property that contains clean surfaceText.
:param input_file: conceptnet json file
:return: list of items with "text" key.
"""
def clean_surface_text(surface_text):
return surface_text.replace("[[", "").replace("]]", "")
items = []
for l_id, line in enumerate(open(input_file, mode="r")):
item = json.loads(line.strip())
text = clean_surface_text(item["surfaceText"])
items.append({"text": text})
return items | 09ff9a177620b765411d04664fcb81d3df11b144 | 38,496 |
from typing import Callable
import random
def HitorMiss(f: Callable[[float], float], n: int) -> float:
"""f: [0, 1]->[0, 1]"""
k = 0 # 命中
for _ in range(n):
x = random.uniform(0, 1)
y = random.uniform(0, 1)
if y <= f(x): # (x, y)在阴影处
k += 1
return k / n | 779bf8b15e04ff65083063d3d3949b40ee27f2f2 | 38,497 |
def validate_password(password):
"""Validates a password
Notes
-----
The valid characters for a password are:
* lowercase letters
* uppercase letters
* numbers
* special characters (e.g., !@#$%^&*()-_=+`~[]{}|;:'",<.>/?) with the
exception of a backslash
* must be ASCII
* no spaces
* must be at least 8 characters
Parameters
----------
password: str
Password to validate
Returns
-------
bool
Whether or not the password is valid
References
-----
http://stackoverflow.com/q/196345
"""
if len(password) < 8:
return False
if "\\" in password or " " in password:
return False
try:
password.encode('ascii')
except UnicodeError:
return False
return True | 3d92419af09494a0d98c7bca07c9237ce60dad42 | 38,499 |
def concatenate_coders(signal, coder_funs, coder_ind, coder_offsets):
"""
Returns a concatenated array of the outputs of each of the coder
functions coders
"""
output = []
for i in range(len(coder_funs)):
sig = [signal[j] for j in coder_ind[i]]
ind = coder_funs[i](sig)
output.extend([j+coder_offsets[i] for j in ind])
return output | 9db0f03eea3db953f5d2360384e4366a6a03a961 | 38,501 |
import os
def bowtie_align(ad, db, AD_ref, DB_ref, output, sh_dir):
"""
Align r1 and r2 to reference
Log bowtie output
"""
basename = os.path.basename(ad)
error_log = os.path.join(sh_dir, f"{basename.replace('.fastq.gz', '')}")
bowtie_log = os.path.join(sh_dir, f"{basename.replace('.fastq.gz', '_bowtie.log')}")
# write header to sh_dir
header = f"#!/bin/bash\n#SBATCH --time=24:00:00\n#SBATCH --job-name={basename}\n#SBATCH " \
f"--cpus-per-task=16\n#SBATCH --error={error_log}-%j.log\n#SBATCH --mem=10G\n#SBATCH " \
f"--output={error_log}-%j.log\n"
# command for AD
params_r1 = "-q --norc --local --very-sensitive-local -t -p 16 "
sam_file_r1 = os.path.join(output, basename.replace('.fastq.gz','_AD_BC.sam'))
input_f_r1 = f"-x {AD_ref} -U {ad} -S {sam_file_r1}"
commandr1 = f"bowtie2 {params_r1} {input_f_r1} 2> {bowtie_log}"
# command for DB
params_r2 = "-q --nofw --local --very-sensitive-local -t -p 16 "
sam_file_r2 = os.path.join(output, basename.replace("_R1", "_R2").replace('.fastq.gz','_DB_BC.sam'))
input_f_r2 = f"-x {DB_ref} -U {db} -S {sam_file_r2}"
commandr2 = f"bowtie2 {params_r2} {input_f_r2} 2>> {bowtie_log}"
# sort r1_sam
sorted_r1 = sam_file_r1.replace(".sam", "_sorted.sam")
sort_r1 = f"samtools sort -n -o {sorted_r1} {sam_file_r1} \n rm {sam_file_r1}"
# sort r2_sam
sorted_r2 = sam_file_r2.replace(".sam", "_sorted.sam")
sort_r2 = f"samtools sort -n -o {sorted_r2} {sam_file_r2} \n rm {sam_file_r2}"
# remove headers
r1 = sam_file_r1.replace(".sam", "_noh.sam")
r2 = sam_file_r2.replace(".sam", "_noh.sam")
rm_headers_r1 = f"grep -v \"^@\" {sorted_r1} > {r1}"
rm_headers_r2 = f"grep -v \"^@\" {sorted_r2} > {r2}"
r1_csv = r1.replace(".sam", ".csv")
r2_csv = r2.replace(".sam", ".csv")
cut_csv_r1 = f"cut -f 1-5 {r1} > {r1_csv}"
cut_csv_r2 = f"cut -f 1-5 {r2} > {r2_csv}"
# write all commands to file for submitting jobs
with open(os.path.join(sh_dir, f"{basename.replace('.fastq.gz', '.sh')}"), "w") as f:
f.write(header+"\n")
f.write(commandr1+"\n")
f.write(sort_r1+"\n")
f.write(rm_headers_r1+"\n")
f.write(cut_csv_r1+"\n")
f.write(commandr2+"\n")
f.write(sort_r2+"\n")
f.write(rm_headers_r2+"\n")
f.write(cut_csv_r2+"\n")
# remove no header sam file
f.write(f"rm {r1}\n")
f.write(f"rm {r2}\n")
return r1_csv, r2_csv, os.path.join(sh_dir, f"{basename.replace('.fastq.gz', '.sh')}") | 57ec0a5bc11a652c9fe022ada64aee074bd8c82b | 38,502 |
import subprocess
def bam2bed(bampath, bedpath):
"""Convert BAM to BED7
BED name field (column 4) contains read id (so that together with map id (col 7) multi-mapper can be identified)
BED tst field (column 7) contains map id (so that split reads can be identified)
BED sc1 field (column 5) is from bedtools bamtobed and contains mapping quality
"""
cmd1 = ['bedtools','bamtobed','-i', bampath, '-split','-bed12']
awkscript = 'BEGIN{OFS="\t";c=1;}{ n=split($11,a,","); n=split($12,b,","); for(i=1;i<=n;i++){st=$2+b[i]; print $1,st,st+a[i],$4,$5,$6,NR}}'
# above keep the original name so that you can go back to fastq
# awkscript = 'BEGIN{OFS="\t";c=1;}{if(d[$4]){$4=d[$4];}else{d[$4]=c;$4=c;c++;} n=split($11,a,","); n=split($12,b,","); for(i=1;i<=n;i++){st=$2+b[i]; print $1,st,st+a[i],$4,$5,$6,NR}}'
cmd2 = ['awk',awkscript]
with open(bedpath, 'wb') as fp:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=fp)
err = p2.communicate()[1]
return err | cf5b41d07d7e1dac904845902cac4de5ce8e850e | 38,503 |
def format_phone(phone):
"""
Format a string as a phone number.
"""
if len(phone) == 10:
return "(" + phone[:3] + ") " + phone[3:6] + "-" + phone[-4:]
else:
return phone | 7506c7c5b3724b27435c54048a478a73edf16ece | 38,504 |
import random
def fresh_deck():
"""
make new deck
:return: deck
"""
suits = {"Spade", "Heart", "Diamond", "Club"}
ranks = {"A", 2, 3, 4, 5, 6, 7, 8, 9, 10, "J", "Q", "K"}
deck = []
# 중첩 for 문으로 52장의 카드를 만들어 리스트 deck을 만든다.
for i in suits:
for j in ranks:
card = {"suit": i, "rank": j}
deck.append(card)
random.shuffle(deck)
# deck을 무작위로 섞는다.
return deck | dd4814993de1569b9d7e660095a0f0981c814e0d | 38,506 |
def get_variables(args):
"""
Return a dictionary of variables specified at CLI
:param: args: Command Line Arguments namespace
"""
variables_dict = {}
if args.variables:
for var in args.variables:
words = var.split('=')
variables_dict[words[0]] = words[1]
return variables_dict | fdc7645600ae491d8fda22174aa1a711a107e34e | 38,507 |
def update_status_message(name, status):
"""Create an update status message
Args:
name (str): name of data provider
status (bool): flag indicating whether or not new files have been downloaded
Returns:
str: update_status_message
"""
if status:
return name + ": Update available"
else:
return name + ": No updates" | 99757b6657606deb16694d6413e1a81138d149d3 | 38,508 |
def merge_maps(*maps):
"""
Merge the given a sequence of :class:`~collections.Mapping` instances.
:param maps: Sequence of mapping instance to merge together.
:return: A :class:`dict` containing all merged maps.
"""
merged = {}
for m in maps:
merged.update(m)
return merged | c3d1c393179a41f80506e1523535b7bf9c2152ed | 38,509 |
def get_mutual_friends(person1_friends, person2_friends):
"""
apply intersection on friends on person 1 and person2
:param person1_friends:
:param person2_friends:
:return:
"""
return list(set(person1_friends) & set(person2_friends)) | 91ea3c229747418e166d1abe30e73ce4d083b727 | 38,510 |
def round_down(value, base):
"""
Round `value` down to the nearest multiple of `base`.
Expects `value` and `base` to be non-negative.
"""
return int(value - (value % base)) | b77bee19f404020be551489f972284f5ee8015ba | 38,511 |
def compare_locations(loc1, loc2):
"""Checks whether locations are within 1kb of each other"""
if loc1[0] == loc2[0] and loc1[2] == loc2[2] and abs(int(loc1[1]) - int(loc2[1])) < 1000:
return 'close'
else:
return 'far' | be29c4e7004d5c4b45463e38e65d22c277002b9f | 38,513 |
def calc_absent(marks):
""" Function which returns the count of absent students. """
absent_count = 0
for mark in marks:
if mark == 0:
absent_count += 1
return absent_count | 444c56dcabe4824c76f44bf07119fb14eedec15f | 38,514 |
def min_sub1(A):
"""
:param A:List[int]
:return: List[(int,int,int)]
"""
def sub_min(start, end):
"""
:param start: int
:param end: int
:return: List[(int,int,int)]
"""
if start == end:
return [(A[start], start, end)]
else:
mid = (start + end) // 2
res_left = sub_min(start, mid)
res_right = sub_min(mid + 1, end)
l_min = A[mid]
l_min_start = mid
r_min = A[mid + 1]
r_min_end = mid + 1
l_ls = 0
r_ls = 0
for i in range(mid, start - 1, -1):
l_ls += A[i]
if l_ls < l_min:
l_min, l_min_start = l_ls, i
for j in range(mid + 1, end + 1):
r_ls += A[j]
if r_ls < r_min:
r_min, r_min_end = r_ls, j
res_over = (l_min + r_min, l_min_start, r_min_end)
print("res = ", res_left, res_right, res_over)
smallest = min(res_left[0][0], res_right[0][0], res_over[0])
res = []
if res_left[0][0] == smallest:
res.extend(res_left)
if res_right[0][0] == smallest:
res.extend(res_right)
if res_over[0] == smallest:
res.append(res_over)
return res
return sub_min(0, len(A) - 1) | 9c81f9d37b57569a2387b291746c0430784b370f | 38,517 |
def create_context(review_config, repo_path,
head_repository, pull_request):
"""Create the context used for running fixers"""
context = {
'strategy': review_config.fixer_workflow(),
'enabled': review_config.fixers_enabled(),
'author_name': review_config['GITHUB_AUTHOR_NAME'],
'author_email': review_config['GITHUB_AUTHOR_EMAIL'],
'repo_path': repo_path,
'pull_request': pull_request,
'repository': head_repository,
}
return context | 00c361b2472a0547019e427cdb8a561094b24aa7 | 38,520 |
import sys
def findClass(className, modulePath):
"""
Returns the class associated with the given string.
To improve the results be sure to provide a class name complete with module path.
:type className: str
:type modulePath: str
:rtype: class
"""
# Check if string is valid
#
if len(className) == 0:
return None
# Split string using delimiter
#
if len(modulePath) == 0:
return globals().get(className, None)
else:
module = sys.modules.get(modulePath, None)
root = modulePath.split('.', 1)[0]
if module is None:
module = __import__(modulePath, locals=locals(), globals=globals(), fromlist=[root], level=0)
return module.__dict__.get(className, None) | c28e46ff19735d4fdaec06f85240ee700f39761b | 38,521 |
def select_eigenvector(w, v, p, tol):
"""
input: eigenvalues (w), eigenvectors (v), density (p) which is
also corresponding eigenvector, tolerance (tol)
output: correct polarization eigenvector
"""
for i in range(3):
if w[i] > p - tol and w[i] < p + tol:
if v.T[i][0] < 0:
return -1. * v.T[i]
else:
return v.T[i] | 7aa907c7590ce0a10b91e3f4b074535ece7445ba | 38,523 |
def parse_authors(authorstr: str):
"""
Create an array of authors from an author list (in the name <email> format) separated
by comma
"""
return [a for a in authorstr.split(",") if a.strip() != "" ] | e5e799cad6897a8b4fe77d6488c5cf3375fb0856 | 38,525 |
import numpy
def table_K1(Flv, l_spacing):
"""Figure 11-27
Regression by Dazzles
"""
a, b, c, d, e, f, g = -0.4141022057164735, -0.15097503976930754, -0.03955953480149698, -0.6833440064263923, 1.2631972777123914, -0.10683783034172412, -1.8165949194259345
return 10**(a*numpy.log10(Flv) + b*(numpy.log10(Flv))**2 + c*numpy.exp(Flv) + d*l_spacing**2 + e*l_spacing + f*numpy.log10(Flv)*l_spacing + g) | d4a993e79ca48a01bcdc7f0d9ffda9b7aee5e09b | 38,528 |
def set_equal(one, two):
"""Converts inputs to sets, tests for equality."""
return set(one) == set(two) | 4f1be741dc3f5d68fd5b540701e5c02aaee73465 | 38,529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.