content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--train_batchsize', type=int, default=32, help='Training batchsize.')
group.add_argument('--seed', type=int, default=0, help='Random seed for IL training experiment.')
group.add_argument('--opt', default='adam', type=str, help='Type of optimizer to use.')
group.add_argument('--lr', type=float, default=1e-5, help='Learning rate.')
group.add_argument('--momentum', default=0.9, type=float, help='Momentum optimization parameter.')
group.add_argument('--weight_decay', default=1e-5, type=float, help='Weight decay optimization parameter.')
group.add_argument('--num_epochs', type=int, default=40, help='Number of training epochs.')
group.add_argument('--top_k', type=int, nargs='+', default=[2, 5],
help='In addition to top-1 generalization accuracy, we track top-k.'
)
group.add_argument('--use_gpu', default=False, action='store_true', help='Use gpu or not.')
group.add_argument('--out_dir', type=str, default='./output', help='Directory to save the experimental results.')
group.add_argument('--hidden_size', type=int, help='Hidden size of the branching policy network.')
group.add_argument('--dropout', type=float, default=0.0, help='Dropout parameter for the branching policy network.')
# TreeGate
group.add_argument('--lr_decay_schedule', type=int, nargs='+', default=[20, 30],
help='Learning rate decay schedule.')
group.add_argument('--lr_decay_factor', type=float, default=0.1, help='LR decay factor.')
# T-BranT
group.add_argument('--noam', default=False, action='store_true', help='Use the Noam Scheduler.')
group.add_argument('--warm_epochs', type=int, default=5, help='Warm epochs for Noam Scheduler.')
return parser | bae29895f2f22db1f44cc248e7d3eee21f4a99d1 | 121,145 |
import requests
def get_user_list(base_url, admin_token):
"""As an admin, get a list of all users on the Saturn Cloud account"""
headers = {"Authorization": f"token {admin_token}"}
result = requests.get(f"{base_url}api/users", headers=headers)
list_of_users = result.json()["users"]
return list_of_users | f70499715022eb6ba9c8826e1a2c13833e0b758a | 121,146 |
def pathnode(path):
"""Split a complete path into its group and leaf components"""
while path.endswith('/'):
path = path[:-1]
path = path.split('/')
node = path.pop(-1)
return '/'.join(path), node | 31303c21e1b22ede7ff718bee432c8ce378b45fc | 121,152 |
def model_information(tileSource, format):
"""
Return the model name or best information we have related to it.
:param tileSource: a large_image tile source.
:param format: the vendor or None if unknown.
:returns: a string of model information or None.
"""
metadata = tileSource.getInternalMetadata()
for key in ('aperio.ScanScope ID', 'hamamatsu.Product'):
if metadata.get('openslide', {}).get(key):
return metadata['openslide'][key]
for key in ('DICOM_MANUFACTURERS_MODEL_NAME', 'DICOM_DEVICE_SERIAL_NUMBER'):
if metadata.get('xml', {}).get(key):
return metadata['xml'][key] | 340c4aed019ea31b81bd6de2a577a444d174710f | 121,156 |
def bulkvel2E(cl, ct, rho):
"""
Converts bulk wave velocities to Young's modulus and Poisson's ratio.
Parameters:
cl : longitudinal wave velocity
ct : shear wave velocity
rho : density
Returns:
E : Young's modulus
nu : Poisson's ratio
"""
lame_2 = ct**2*rho
nu = (2 - cl**2/ct**2)/(2*(1 - cl**2/ct**2))
E = lame_2*2*(1 + nu)
return E, nu | 5c6f1027c62f746a6ae3d8e8a72617c40a65fedb | 121,157 |
def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right | 64c96ca83ab4c5fceec63c5aa4743ce398f9a48d | 121,158 |
def round_float_str(s, precision=6):
"""
Given a string containing floats delimited by whitespace
round each float in the string and return the rounded string.
Parameters
----------
s : str
String containing floats
precision : int
Number of decimals to round each float to
"""
round_str = ''
for token in s.split():
try:
f = round(float(token), precision)
round_str += str(f)
except:
round_str += token
round_str += ' '
return round_str | 861462e078f447f35a42a0e105fe72168f5ac4b0 | 121,163 |
def parse_line_count(line):
"""Parse the information with line counts."""
line = line.strip()
line_count, filename = line.split(" ")
# remove prefix that is not relevant much
if filename.startswith("./"):
filename = filename[len("./"):]
return int(line_count), filename | 65bde91f80be91d4e15f976116b62454ab24b5e6 | 121,170 |
import configparser
def get_config_ini(ini_file, section, key):
"""
Get config.ini section->key->value
[section]
key = value
:param ini_file:
:param section:
:param key:
:return:value
"""
try:
config = configparser.ConfigParser()
config.read(ini_file)
value = config.get(section, key)
except Exception as e:
raise Exception(e)
else:
return value | 75a406740f7b2540f56b06a5ed998bd625f8a563 | 121,173 |
def word_list_to_idx_list(word_list, w2v_vocab):
""" Convert each word in word_list
to its equivalent index in w2v_vocab (Word2Vec model vocab)
"""
idx_list = []
for word_seq in word_list:
# we add 1 to distinguish class 0 from padded zeros
# (because we will pad zeros during training!)
# idx_seq = [ (index2word.index(word) + 1) for word in word_seq ]
idx_seq = [ (w2v_vocab.get(word).index + 1) for word in word_seq ]
idx_list.append(idx_seq)
return idx_list | f68d901043b1ae28bde8af3ad4576bffc8e4e21f | 121,175 |
def sec2time(seconds):
"""
Converts seconds to time format
:param float|int seconds:
:return str: 'h:m:s"
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s) | fbf1cbdf7d049f97d5b3ada7b84c3f0ed0a2eaee | 121,179 |
def find_index(items, predicate):
"""Find index of first element that match predicate in collection"""
return next((index for index, x in enumerate(items) if predicate(x)), -1) | 627e1c6da3cfbfd4d9372b5dbffa2e2714b53076 | 121,181 |
from typing import Optional
import random
from typing import List
def extract_sample(word_list: list, sample_size: Optional[int] = None) -> list:
"""Returns a random sample from the word list or a shuffled copy of the word list.
:param word_list: the list of words to extract the random sample from
:param sample_size: If this number is greater than the length of the word list, then just return a shuffled
copy of the word list.
"""
if not sample_size or len(word_list) <= sample_size:
return random.sample(word_list, k=len(word_list))
else:
sample: List[str] = []
while len(sample) < sample_size and len(word_list) > 0:
sample += [word for word in random.sample(word_list, k=sample_size) if word not in sample]
word_list = [word for word in word_list if word not in sample]
if sample_size < len(sample):
return random.sample(sample, k=sample_size)
return sample | a4e9a98403a9b38d374200d9eb3fd403a1a2a7da | 121,188 |
def flatten(x, out=None, prefix='', sep='.'):
"""
Flatten nested dict
"""
out = out if out is not None else {}
if isinstance(x, dict):
for k in x:
flatten(x[k], out=out, prefix=f"{prefix}{sep if prefix else ''}{k}", sep=sep)
elif isinstance(x, (list, tuple)):
for k, v in enumerate(x):
flatten(k, out=out, prefix=f"{prefix}{sep if prefix else ''}{k}", sep=sep)
else:
out[prefix] = x
return out | 8a76a3ee959365ba4596c8f186347352ca57f0b7 | 121,190 |
def fromarrays(arrays):
"""Creates a multi-frame iterator from given list of arrays.
Parameters
----------
arrays : tuple of array-like
A tuple of array-like objects that represent a single-camera videos
Returns
-------
video : iterator
A multi-frame iterator
"""
return (frames for frames in zip(*arrays)) | 00b7efe570371056e737299e9ca71e5e4a263bf1 | 121,192 |
import math
def entity_count_penalty(source_entities_count, candidate_entities_count):
"""Calculates the entity count penalty (ECP) introduced in the paper.
ECP is inspired by BLEU’s brevity penalty. ECP penalizes systems producing c
entities if c is more than twice the number of entities in the source.
Args:
source_entities_count: Count of source entities.
candidate_entities_count: Count of candidate entities.
Returns:
ECP value.
"""
s, c = float(source_entities_count), float(candidate_entities_count)
return 1.0 if c < 2 * s else math.exp(1 - c / (2 * s)) | dc85fde60ddfb7827dc18187362e461fc35f9549 | 121,195 |
def calc_number_of_spikes(in_array, threshold=0.0):
"""
:param in_array: array of values
:param threshold: value that if in_array passes, counts as a spike
:return: num_spikes: integer value of the number of spikes
"""
num_spikes = 0
for in_ind, in_val in enumerate(in_array[0:-2]):
if in_val < threshold < in_array[in_ind+1]:
num_spikes += 1
return num_spikes | 2465427b824295830d782de2fdda6c915611c0a0 | 121,197 |
def _calc_tc(eff, n_gre, tr_gre, tr_seq, ti1, ti2, a1, a2):
"""Calculate TC for MP2RAGE sequence."""
return tr_seq - ti2 - n_gre * tr_gre / 2.0 | 0c376433a6aac792d62480ddd4c03a2a7df63f3f | 121,199 |
def dot(a,b):
"""Returns the dot product of two vectors"""
c = []
for ax, by in zip(a,b):
c.append(ax*by)
return c | f7b33e3a96cc8d2d898597ee7be781898f00c12c | 121,201 |
def euler16(num: int) -> int:
"""Calculates the sum of the digits of 2 to the power of num."""
return sum([int(digit) for digit in str(2 ** num)]) | 352c47aca3efdcefef3db0f9c21a6ce133a87157 | 121,204 |
def collect_blocks(d_blocks, ayear, n):
"""
Collect a block of patents for a window of n years regarding a focus
year.
If n is possitive the patents are from the future.
If n is possitive the patents are from the past.
Parameters
----------
d_blocks : A dictionary of patent blocks, the key is the year, and the
value is a list of indexes of the patents belongin to that year.
ayear: The focus year
n: The window size
Returns
-------
block : A list with all the patents in the window of n years (past or
or future)
"""
inc = int(n/abs(n))
ayears = list(range(ayear+inc, ayear+n+inc, inc))
block = []
for i in ayears:
if i in d_blocks:
block.extend(d_blocks[i])
return block | ea7029092610a59e50c5ff9e59f06f16a43a3420 | 121,205 |
def _skip_string(self, string: str) -> bool:
"""
Our version of the skip_string method from
discord.ext.commands.view; used to find
the prefix in a message, but allowing prefix
to ignore case sensitivity
"""
strlen = len(string)
if self.buffer.lower()[self.index:self.index + strlen] == string:
self.previous = self.index
self.index += strlen
return True
return False | 72833f2d79fb82a3e121314de3b18e15b3b0df51 | 121,206 |
def node_to_dict(node):
""" Convert a node to a dictionary representation. We skip the
children, turning them instead into a list of labels instead. """
if not hasattr(node, 'child_labels'):
node.child_labels = [c.label_id() for c in node.children]
node_dict = {}
for k, v in node.__dict__.items():
if k not in ('children', 'source_xml'):
node_dict[k] = v
return node_dict | 4e8143d76a8389da2a1d29e4dd63cc0592b67f97 | 121,207 |
import random
def generate_random_string(size):
"""
Generates random ascii string of given size.
:param size: integer value
:return: a string of given size
"""
out_str = ''
Range = list(range(65,90)) + list(range(97,122))
for i in range(size):
#a = random.randint(65, 90)
a = random.choice(Range)
out_str += chr(a)
return out_str | ebe439095c665238f908ffdbac197ab5785d66dc | 121,208 |
import string
import secrets
def password_generator(password_type=None, length=None) :
"""A random password generator
password_type(str): the type of password, deciding charset
length(int): password length, default 20 chars
return(str): a random password
raise: ValueError if wrong type
"""
if length is None :
password_length = 20
elif length < 0 :
raise ValueError('Length cannot be negative!')
else :
password_length = int(length)
if password_type is None :
password_type = 'plaintext'
# Available charset
_charset = {
'mysql': string.ascii_letters + string.digits + r'!@#$%^&*',
'ssh': string.ascii_letters + string.digits + r'~!@#$%^&*)(}{?+/=][,.><;:`',
'username': string.ascii_letters + string.digits + '_',
'plaintext': string.ascii_letters + string.digits
}
# Define charset according to password type
try :
alphabet = _charset[password_type]
except KeyError :
raise ValueError('Wrong type!')
# A password consists of random characters
password = ''.join(secrets.choice(alphabet) for i in range(password_length))
# A username starts with a letter
if password_type == 'username' :
password = secrets.choice(string.ascii_letters) + password[1:]
return password | e2baba6894a4ce4350c6cd11838e978b655f17bb | 121,210 |
def one_hot_vector(val, lst):
"""Converts a value to a one-hot vector based on options in lst"""
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst) | 401ff1d6666c392b3a217659929a4f7832c52522 | 121,211 |
def in_circle(x, y, radius = 1):
"""Return True if the point is in the circle and False otherwise."""
return (x*x + y*y) < radius*radius | 4710d0dd78bbb70e65914dedd4e7f9414f475cf1 | 121,214 |
import hashlib
def seed_hash(*args):
"""Derive an integer hash from all args, for use as a random seed."""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31) | 32e39687604506fa79787d5a34df0f67fd780b41 | 121,220 |
def copy(grille):
"""
Copier une grille.
"""
new_grille = []
for l in grille:
new_grille.append(l[:])
return new_grille | 9b799b9d0640b1820a38d51c98f9adddcd1a5632 | 121,222 |
def ft_to_m(ft) -> float:
"""Converts feet to meters"""
return ft / 3.28084 | bc2b5388b651c66e527c485ad8da818dc61580f8 | 121,223 |
def round_down(address, align):
"""round_down(address, align) -> int
Round down ``address`` to the nearest increment of ``align``.
"""
return address & ~(align-1) | ea2ee091d2ba644e0402eddb39d4407248e96258 | 121,224 |
def get_fraction_of_tweets_in_language(tweets):
"""Returns fraction of languages in a tweet dataframe as a dictionary
Args:
tweets (pandas.DataFrame): Tweet DataFrame as returned by `get_latest_tweets`
Returns:
language_fractions (dict): {languagecode (str): fraction (float)}
"""
language_fractions = tweets['lang'].value_counts(normalize=True)
language_fractions = language_fractions.to_dict()
return language_fractions | f6725f5ef4dc448eb82e0bf958eb10ea4c0bd0dd | 121,230 |
import tarfile
def is_tarfile(filename):
"""Tells if the file is a tar ball"""
return tarfile.is_tarfile(filename) | 6aeb443d7be140ac914504208b992155639a4af0 | 121,231 |
def get_heading_text(tag):
"""
Extract the text of the heading, discarding "[edit]".
May need to be modified to work for more complex headings.
:param tag: a Tag object. It should be one of the <h?> tags.
:return: the actual/clean text in the tag
"""
text = tag.get_text()
text = text.split('[')[0]
return text | ac28ddee62c6d62290b0af560916db2a1961a3e2 | 121,236 |
def from_vector(x):
"""Convert the given numpy array to a python flat list.
Args:
x (np.ndarray): The value to convert
Returns:
List: The converted value
"""
return x.reshape((-1, )).tolist() | faf39419b5692f31fe4d57fde1155553676b2414 | 121,239 |
from typing import Any
def is_unicode_string(val: Any) -> bool:
"""Return True if `val` is a unicode string, False for a bytes-like
object."""
return hasattr(val, 'encode') | 45d098a3acf4ffbdb33d1992568722cd02c1c6a1 | 121,240 |
def get_user_attributes_from_ldap(ldap_connection, ldap_base_dn, login, attribute):
"""returns the user group names, no permissions for now
:param ldap3.Connection ldap_connection: The ldap_client as ldap3.Connection instance
:param str ldap_base_dn: The domain name in LDAP format (all this CN, DN stuff)
:param str login: The login
:param str attribute: The attribute to query
"""
result = []
if ldap_connection:
ldap_filter = '(sAMAccountName=%s)' % login
result = ldap_connection.search(
ldap_base_dn,
ldap_filter,
attributes=attribute,
)
if result:
data = ldap_connection.response
return data[0]['attributes'][attribute]
return None | 9af92df16cb74367b8c4e8575f05ac66e91e1fbd | 121,241 |
def compute_cn_matching_score(matching_cn):
"""
Computes the constant features matching score.
Args:
matching_cn: Set of constant features that exists in both query and
related formula.
Returns:
The constant matching score.
"""
return len(matching_cn) | b9994bac271366e1fa79f3d2242f1fac3f37e5e2 | 121,243 |
def clean_data(df, new_name=None):
"""
Clean the given data and perform basic cleaning operations
:param df: The DataFrame extracted from given whatsapp chats
:type df: DataFrame
:param new_name: list of names if you want to replace senders name to something shorter
:type new_name: List
:return: Cleaned DataFrame
:rtype: DataFrame
"""
if new_name:
original_name = df["from"].unique().tolist()
df.replace(original_name, new_name, inplace=True)
df.dropna(subset=['text'], inplace=True)
df.set_index('time', inplace=True, drop=False)
return df | 2c5e8d1251c6dab487275f6c74d943569b47f42d | 121,244 |
import torch
def collate_test(batch):
"""
Collate function for preparing the testing batch for DCL testing during training.
:param batch: The list containing outputs of the __get_item__() function.
Length of the list is equal to the required batch size.
:return: The batch containing images and actual labels for testing
"""
imgs = [] # List to store the images
target = [] # List to store the targets
# Iterate over each output of the __get_item__() function
for sample in batch:
imgs.append(sample[0]) # Append the original image
target.append(sample[1]) # Append the origin class label
# Stack the images and return the test batch
return torch.stack(imgs, 0), target | f05b6131a11355dca61944daf77fbd0d4f6cb627 | 121,248 |
def get_path(urn):
""" return the path for the repo """
return "/repos/{urn}".format(urn=urn) | 1cb77d0caca102a4fc2d3bdf51ec7dde3e3f15c9 | 121,250 |
def get_current_week_days(today):
"""
Returns the datetimes for all days in the current work week as strings
:param today: today as pd.datetime
:return: string formatted datetimes (max 7)
"""
days = [today]
for day in range(1, 8):
if days[-1].day_of_week == 0:
new_day = today.subtract(days=day)
days.append(new_day)
break
new_day = today.subtract(days=day)
days.append(new_day)
return [d.to_date_string() for d in days] | 2afd15879020d66ccf91dddc01ab078afb437800 | 121,253 |
import uuid
def rpc_wrapper(command, rpc_id=None):
"""Wrap a command in `rpc_request` with the given `rpc_id`."""
return {
'kind': 'rpc_request',
'args': {'label': rpc_id or str(uuid.uuid4())},
'body': [command]} | 881956c445c58c26af92ff398cdd058f3c2bfc1d | 121,254 |
def _compare_logical_disks(ld1, ld2):
"""Compares the two logical disks provided based on size."""
return ld1['size_gb'] - ld2['size_gb'] | 29a51ee7241239a2a02739db203e1c481c462568 | 121,255 |
def _find_duplicate_variables(strings):
"""Find all string variables that appear more than once."""
seen = set()
duplicates = []
for string in strings:
var = string.variable
if var in seen:
duplicates.append(var)
else:
seen.add(var)
return duplicates | 4e52b3e9bf8dba829d9719e0dd222ab9d635db08 | 121,260 |
def write_aws_credentials(aws_credentials, credentials_file):
"""
Write the AWS credentials config to file as an ini file
"""
with open(credentials_file, 'w') as aws_credentials_file:
aws_credentials.write(aws_credentials_file)
print('Written credentials out to {}'.format(credentials_file))
return True | 97339f8a792d6eec0c43a1b6e5eb9ebac196d2bf | 121,274 |
from typing import Dict
from typing import Any
def flatten(data: dict, prefix: str = "") -> Dict[str, Any]:
"""
Recursively flatten a dict into the representation used in Moodle/PHP.
>>> flatten({"courseids": [1, 2, 3]})
{'courseids[0]': 1, 'courseids[1]': 2, 'courseids[2]': 3}
>>> flatten({"grades": [{"userid": 1, "grade": 1}]})
{'grades[0][userid]': 1, 'grades[0][grade]': 1}
>>> flatten({})
{}
"""
formatted_data = {}
for key, value in data.items():
new_key = f"{prefix}[{key}]" if prefix else key
if isinstance(value, dict):
formatted_data.update(flatten(value, prefix=new_key))
elif isinstance(value, list):
formatted_data.update(flatten(dict(enumerate(value)), prefix=new_key))
else:
formatted_data[new_key] = value
return formatted_data | a2b0439de0a2505d8940e1d8d55b750275023afa | 121,275 |
def param_defaults(param, **defaults):
"""Add keys with default values if absent.
Parameters
----------
param : dict
The dictionary of parameters.
**defaults : keyword arguments
The default values of the missing keys.
Returns
-------
out : dict
A shallow copy of the parameters with specified (shallow) defaults.
"""
return {**defaults, **param} | b7b2d95e3430c2572b4b664c9967c6cbd72a7a63 | 121,281 |
def _versionTuple(versionStr):
"""Returns a tuple of int's (1, 81, 3) from a string version '1.81.03'
Tuples allow safe version comparisons (unlike strings).
"""
try:
v = (versionStr.strip('.') + '.0.0.0').split('.')[:3]
except (AttributeError, ValueError):
raise ValueError('Bad version string: `{}`'.format(versionStr))
return int(v[0]), int(v[1]), int(v[2]) | 3bfab906261a67c739dcb4d2f2c83fd14af1fa9c | 121,282 |
def get_epochs(logs_dict: dict, metric: str):
"""Get all the epochs the metric was evaluated."""
return sorted([int(epoch) for epoch in logs_dict['epoch_results'] if
logs_dict['epoch_results'][epoch]['test_results'] and
logs_dict['epoch_results'][epoch]['test_results'][metric] is not None]) | fdb0f9fb2a55372d5cfe8309409c74e21c0f6585 | 121,283 |
from pathlib import Path
def getFiles(path, suffix=None, prefix=None, parent=None, recursive=False, hiddenFiles=False):
"""
Recursively get all files in the given path. If suffix, prefix or parent are set, only files
meeting these conditions are returned. Hidden files can be excluded and the search can be performed recursively.
Args:
path (:class:`pathlib.Path` or :class:`str`): path to the directory to check recursively
suffix (str): filter by file suffix (default: None)
prefix (str): filter by file prefix (default: None)
parent (str): filter by parent directory name (default: None)
recursive (bool): descent into child directories?
hiddenFiles (bool): include hidden files?
Returns:
:class:`list` of :class:`pathlib.Path`
"""
# check if the input is a Path object (or inherited from it, i.e. is an instance of it)
if not isinstance(path, Path):
# try to convert it
path = Path(path)
# store result
res = []
# check each element in the current directory
for element in path.iterdir():
if element.is_dir() and recursive:
# recursively call the function on subdirectories
res += getFiles(element, suffix=suffix, prefix=prefix, parent=parent, recursive=recursive,
hiddenFiles=hiddenFiles)
else:
# store the element
candidate = element
# check all conditions and reset candidate if one of them fails
if suffix is not None and not element.name.endswith(suffix):
candidate = None
elif parent is not None and element.parts[-2] != parent:
candidate = None
elif prefix is not None and not element.parts[-1].startswith(prefix):
# special case for hidden files: the prefix starts after the '.'
if hiddenFiles and element.parts[-1].startswith('.' + prefix):
pass
else:
candidate = None
elif not hiddenFiles and element.parts[-1].startswith('.'):
candidate = None
if candidate:
res.append(candidate)
return res | 207fb299af4c7e3c3ef21b0b4b44243dfad9249d | 121,293 |
def _getPlatesAtAPO(plates):
"""Returns lists of plates at APO and outside it."""
atAPO = []
notAtAPO = []
for plate in plates:
if plate.getLocation() == 'APO':
atAPO.append(plate)
else:
notAtAPO.append(plate)
return atAPO, notAtAPO | c7bc91606b3417416aedb28185e621b6bbb8d62e | 121,301 |
def equal(seq):
"""Determine whether a sequence holds identical elements.
"""
return len(set(seq)) <= 1 | ecb80a4a997d6743ec38506193785176fdcea41a | 121,303 |
def config_bootstrap_bash(name, commands):
"""
Generate bootstrap block that executes a bash script defined by a set of commands, joined together
:param name: name of bootstrap action
:param commands: commands to chain together
:return:
"""
return {
'Name': name,
'ScriptBootstrapAction': {
'Path': 'file:///bin/bash',
'Args': ['-c', ' && '.join(commands)]
}
} | f3a1b40abfccdab559c06511db029251d2a8c1f6 | 121,304 |
def intbreak(n):
""" Returns a list of digits of a given integer
using list comprehension.
"""
return [int(d) for d in str(n)] | de474e74e752f149789646edf0e49831e9f2684d | 121,306 |
def update_partition(b, ni, r, s, M, M_r_row, M_s_row, M_r_col, M_s_col, d_out_new, d_in_new, d_new):
"""Move the current node to the proposed block and update the edge counts
Parameters
----------
b : ndarray (int)
current array of new block assignment for each node
ni : int
current node index
r : int
current block assignment for the node under consideration
s : int
proposed block assignment for the node under consideration
M : ndarray or sparse matrix (int), shape = (#blocks, #blocks)
edge count matrix between all the blocks.
M_r_row : ndarray or sparse matrix (int)
the current block row of the new edge count matrix under proposal
M_s_row : ndarray or sparse matrix (int)
the proposed block row of the new edge count matrix under proposal
M_r_col : ndarray or sparse matrix (int)
the current block col of the new edge count matrix under proposal
M_s_col : ndarray or sparse matrix (int)
the proposed block col of the new edge count matrix under proposal
d_out_new : ndarray (int)
the new out degree of each block under proposal
d_in_new : ndarray (int)
the new in degree of each block under proposal
d_new : ndarray (int)
the new total degree of each block under proposal
Returns
-------
b : ndarray (int)
array of block assignment for each node after the move
M : ndarray or sparse matrix (int), shape = (#blocks, #blocks)
edge count matrix between all the blocks after the move
d_out_new : ndarray (int)
the out degree of each block after the move
d_in_new : ndarray (int)
the in degree of each block after the move
d_new : ndarray (int)
the total degree of each block after the move"""
b[ni] = s
M[r, :] = M_r_row
M[s, :] = M_s_row
M[:, r] = M_r_col.reshape(M[:, r].shape)
M[:, s] = M_s_col.reshape(M[:, s].shape)
return b, M, d_out_new, d_in_new, d_new | 02c637244bf7e1b9c3b38b71d47d9923db8f2d74 | 121,308 |
def cache_filename(product, day):
"""Helper function to get the name of the cache
:param product: product of the data to cache
:type product: str
:param day: day of the data to cache
:type day: datetime.datetime
:return: Filename of the cache
:rtype: str
"""
return f"cache-{product}-{day.isoformat()}" | 32ee7192fd56f285b68b95e9e3bc672e924eb84c | 121,310 |
def is_number(obj):
"""Check if obj is number."""
return isinstance(obj, (int, float)) | 278602a6fc9535af770f9414ea90d4c385c15d73 | 121,312 |
def qsub_sanitize_job_name(testjobname):
""" Ensure that qsub job names must begin with a letter.
Numbers and punctuation are not allowed.
>>> qsub_sanitize_job_name('01')
'J01'
>>> qsub_sanitize_job_name('a01')
'a01'
"""
if testjobname[0].isalpha():
return testjobname
else:
return 'J' + testjobname | f4143731dcf91424a2a8510d333f1245762dd817 | 121,314 |
def _get_include_paths(properties):
"""
Get a list of paths that need to be included for ComputedStyleBase.
"""
include_paths = set()
for property_ in properties:
include_paths.update(property_['include_paths'])
return list(sorted(include_paths)) | 67991f8c5a2b0ff64e5d8510995afdec7ffc72f5 | 121,321 |
def positive_number_to_smt(number):
"""
Print a floating-point number in decimal notation (i.e., prevent scientific notation).
This code is taken from https://stackoverflow.com/a/45604186.
:param number: floating-point number
:return: string in decimal notation
"""
number_as_two_strings = str(number).split('e')
if len(number_as_two_strings) == 1:
# no scientific notation needed
return number_as_two_strings[0]
base = float(number_as_two_strings[0])
exponent = int(number_as_two_strings[1])
result = ''
if int(exponent) > 0:
result += str(base).replace('.', '')
result += ''.join(['0' for _ in range(0, abs(exponent - len(str(base).split('.')[1])))])
elif int(exponent) < 0:
result += '0.'
result += ''.join(['0' for _ in range(0, abs(exponent) - 1)])
result += str(base).replace('.', '')
return result | f724cecdecade01b5e0d9205fe240561d1577126 | 121,324 |
from typing import List
import re
import string
def split_into_words(text) -> List[str]:
"""
Convert text into word list.
Delimiter: Half-width Space.
:param str text: Text to be converted into words.
:rtype: List[str]
:return: Converted word list.
"""
text = text.strip()
pattern = re.compile(r'[{0}]+'.format(string.whitespace))
text = re.sub(pattern, ' ', text)
if len(text) == 0:
return []
return text.split(' ') | 4af169e8d97bbcbd608764ee4264b686e051f66d | 121,325 |
def _get_param_value(param, request):
"""Gets the value for the parameter.
Args:
param: a PipelineParameter instance describing the parameter
request: the PipelineBuildRequest instance
Returns:
val: the parameter value
"""
if param.param_str in request.parameters:
# User-set parameter
val = request.parameters[param.param_str]
elif param.has_set_value:
# Pipeline-set parameter
val = param.set_value
else:
# Module-default value
val = param.default_value
return val | b97a25a2d63fb42c24ce09dc5419c8b07da28d3b | 121,327 |
def merge(dicts):
"""Merges multiple dictionaries into a new one."""
master_dict = {}
for dict_ in dicts:
master_dict.update(dict_)
return master_dict | 56ccdcf02a9dc47cabdb63a01e667cefa9ec9e0d | 121,328 |
from typing import Tuple
def compute_ever(length: float, net_ele: float) -> Tuple[float, float]:
"""
Computes the amount of laps and the total length required to achieve an everesting.
:param length: Length of one uphill in km.
:type length: float
:param net_ele: Netto elevation gain of one uphill.
:type net_ele: float
:return: First is the amount of laps. Second is total length of the all laps.
:rtype: tuple[float, float]
"""
laps: float = 8848 / net_ele
total_length: float = 2 * length * laps
return laps, total_length | ac096ec722db748a8082cf9f461eb109972d77be | 121,329 |
def constructor_dict(device_superclass):
"""
Generates dictionary with the class name as key and class as value.
Useful for constructing device class instances
"""
constructors_by_name = {}
for device in device_superclass.__subclasses__():
constructors_by_name[device.__name__] = device
return constructors_by_name | f0d7f44360b448ddc2f6bc8f45c19f9c2c3dde85 | 121,332 |
def with_init(attrs, defaults=None):
"""
A class decorator that wraps the __init__ method of a class and sets
*attrs* first using keyword arguments.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
:param defaults: Default values if attributes are omitted on instantiation.
:type defaults: `dict` or `None`
"""
if defaults is None:
defaults = {}
def init(self, *args, **kw):
for a in attrs:
try:
v = kw.pop(a)
except KeyError:
try:
v = defaults[a]
except KeyError:
raise ValueError("Missing value for '{0}'.".format(a))
setattr(self, a, v)
self.__original_init__(*args, **kw)
def wrap(cl):
cl.__original_init__ = cl.__init__
cl.__init__ = init
return cl
return wrap | 511a66445f2aaca703fa6a9a7381ec466e2a57f8 | 121,335 |
def spacy_tag_to_dict(tag):
"""
Creater a dict from spacy pos tags
:param tag: Extended spacy pos tag ("Definite=Ind|Gender=Masc|Number=Sing|PronType=Art")
:return: A dictionary in the form of "{'Definite': 'Ind', 'Gender': 'Masc', 'Number': 'Sing', 'PronType': 'Art'}"
:rtype: dict
"""
if tag and '=' in tag:
return dict([t.split('=') for t in tag.split('|')])
else:
return {} | da48e8da89636de831461a9469d1177903d9c334 | 121,338 |
import math
def bearing(p0, p1):
"""
>>> bearing((0, 0), (0, 1))
0.0
>>> bearing((0, 0), (1, 1))
45.0
>>> bearing((0, 0), (1, 0))
90.0
>>> bearing((0, 0), (1, -1))
135.0
>>> bearing((0, 0), (0, -1))
180.0
>>> bearing((0, 0), (-1, -1))
225.0
>>> bearing((0, 0), (-1, 0))
270.0
>>> bearing((0, 0), (-1, 1))
315.0
"""
dx = float(p1[0]) - float(p0[0])
dy = float(p1[1]) - float(p0[1])
d = math.sqrt(dx**2 + dy**2)
a_r = math.asin(dy / d)
a_d = int(a_r / math.pi * 180.0)
if dx >= 0:
# Quadrant 1/4
return 90 - a_d
else:
# Quadrant 2/3
return 270 + a_d | 66cef44b5433bfba3808ac9d422a0a594d4cd911 | 121,339 |
def tuples(x :list, y: list) -> list:
"""Function merging two list into tuples - used in @pytest.mark.parametreize
Args:
x (list): list with same shape
y (list): list with same shape
Returns:
list of tuples
"""
return [(x[i], y[i]) for i in range(0, len(x))] | 77792083aa24627937901c02b5ccc092ce09498e | 121,340 |
def secret_sort(secret_index):
"""
Return a lambda that takes one argument, a list of lists of an unknown amount of numbers. The lambda should return the list of lists, but sorted by sublist[secret_index].
:param secret_index: secret index to sort by (integer)
:return: lambda that takes a list of lists of numbers and sorts them
"""
return lambda l: sorted(l, key=lambda x: x[secret_index]) | 11ce05af001c34775fbdfe41a363acb55f3e189c | 121,349 |
import socket
def get_ip_address_and_hostname(hostname=None):
"""Simple helper to get the ip address and hostname.
:param hostname: None here grabs the local machine hostname
:returns: ip address, hostname
:rtype: str, str
"""
hostname = socket.gethostname() if hostname is None else hostname
ip_addr = socket.gethostbyname(hostname)
return ip_addr, hostname | a0c2a6f058609cc17c936ab03f765dbc80b5ef05 | 121,351 |
def createInpCfg(inp):
"""
Create input layer configuration.
Parameters
----------
inp : Input layer.
Returns
-------
cfg : Dictionary representing the configuration of the layer.
"""
cfg = {}
cfg['name'] = inp.name.split(':')[0]
cfg['class_name'] = 'InputLayer'
cfg['config'] = {'batch_input_shape':tuple(inp.shape.as_list()),
'dtype':'float32', 'sparse':False, 'name':inp.name.split(':')[0]}
cfg['inbound_nodes'] = []
return cfg | d8eb2eb0d49fee9f7099f5db5a45dcddde7395d0 | 121,352 |
def cauchy(x0, gx, x):
"""1-D Cauchy. See http://en.wikipedia.org/wiki/Cauchy_distribution"""
#return INVPI * gx/((x-x0)**2+gx**2)
gx2 = gx * gx
return gx2 / ((x-x0)**2 + gx2) | ed76e0683c68642fc3c4a81754df2eb38344237b | 121,353 |
def _hello_file(tmpdir):
"""Return a temp file to hash containing "hello"."""
file = tmpdir / 'hashable'
file.write_text('hello')
return file | 93f95bf23243aa1cb8d273b16cec20bde92c181e | 121,356 |
def flatten_dict(x, title=''):
"""
Convert a recursive dict to the dict with one layer.
:param x:
:param title:
:return:
"""
ret = {}
for k in x:
new_k = k if title == '' else f'{title}_{k}'
if isinstance(x[k], dict):
ret.update(flatten_dict(x[k], new_k))
else:
ret[new_k] = x[k]
return ret | 3736440bdc58ea7cf32e5fdce3889380fde37d81 | 121,357 |
import re
def parse_relaxation(relaxation):
"""
Helper function to verify if time format is as expected and to split it.
:param relaxation: string representing the grace period
:return: tuple consisting of integers representing the hour, minute and second
"""
assert re.match(r"[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", relaxation), 'Relaxation is expected in a form HH:MM:SS, ' \
'but {} was provided'.format(relaxation)
split = relaxation.split(':')
return int(split[0]), int(split[1]), int(split[2]) | d6f03be5feb975c0d364975437487c13792f5333 | 121,358 |
def noop(arg):
"""Do nothing, return arg"""
return arg | 0c79f4bb6fbfc440807d293daea14639d27d65d0 | 121,362 |
def exhaust(fn, x):
""" Apply a fn repeatedly until it has no effect """
new, old = fn(x), x
while(new != old):
new, old = fn(new), new
return new | 74f31cb1a6fab7e6df663e7ce217df97962d65b3 | 121,363 |
from typing import Literal
import hashlib
def hash(
filepath: str, method: Literal["sha1", "md5"] = "sha1", buffer_size: int = 65536
) -> str:
"""
Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str
"""
if method == "sha1":
hash_function = hashlib.sha1()
elif method == "md5":
hash_function = hashlib.md5()
else:
raise NotImplementedError(
f"Only md5 and sha1 hashes are known, but '{method}' was specified."
)
with open(filepath, "rb") as fp:
while True:
data = fp.read(buffer_size)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest() | 60146ef2f4c5523b1ecc84679fc146df2b321588 | 121,377 |
import pathlib
def temp_projdir(tmp_path) -> pathlib.Path:
"""Make a project directory, and return a Path() to it."""
dummy_projname = tmp_path / "dummy_projname"
dummy_projname.mkdir()
return dummy_projname | 7cf6a601c70dab9a915dfe517aa4f9ce21e841c9 | 121,379 |
def sixframe_to_genome(start, end, frame, length):
"""
Converts 6-frame amino acid sequence coordinates to genomic coordinates. All coordinates are 0-based.
@param start: amino acid start coordinate
@param end: amino acid end coordinator (non-inclusive)
@param frame: reading frame (one of +1, +2, +3, -1, -2, -3)
@param length: reference genome nucleotide sequence length - this parameter is required for negative frame calculations only
@return: (start, end) tuple of corresponding genome coordinates
"""
if frame >= 0:
start = 3*start+(frame-1)
end = 3*end+(frame-1)
else:
start = length-(3*start)-(frame-1)
end = length-(3*end)-(frame-1)
return (start, end) | 1e7f1fea92842582e413ee1aef404fb29d4a6757 | 121,385 |
def _extract_errors(log_interpretation):
"""Extract all errors from *log_interpretation*, in no particular order."""
errors = []
for log_type in ('step', 'history', 'task'):
errors.extend(
log_interpretation.get(log_type, {}).get('errors') or ())
return errors | 2eca78f89f0a12d7ad9c79a661f6fc0ca3933271 | 121,386 |
import string
import secrets
def __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols):
"""
Randomly decides where to add padding and which characters to use
:param word_count: Number of words in passphrase
:param pad_length: Number of characters to use for padding
:param digits: True to use digits in padding
:param punctuation: True to use punctuation in padding
:param ignored_symbols: str containing all characters to ignore during padding generation
:return: A tuple containing the padding placements and the size of the character pool used to pad
"""
char_pool = ''
if digits:
char_pool += string.digits
if punctuation:
char_pool += string.punctuation
char_list = [char for char in char_pool if char not in ignored_symbols]
indexes = [index for index in range(word_count + 1)]
placements = {}
for _ in range(pad_length):
idx = secrets.choice(indexes)
if idx not in placements:
placements.update({idx: [secrets.choice(char_list)]})
else:
placements[idx].append(secrets.choice(char_list))
return placements, len(char_list) | 46948940916263d069f8a43adf6a62a842f1d5a1 | 121,387 |
def count_bad_base(seq):
"""
Return the number of bases that are not A/T/C/G, excluding 'N's
"""
#count = 0
#for s in seq:
# if s not in ['A','T','C','G','N']:
# pdb.set_trace()
# print(s)
return sum(s.upper() not in ['A','T','C','G','N'] for s in seq) | 3b90aaf863f29ecf03e245784abcdc563046de88 | 121,392 |
def clean_rgb(rgb_df):
""" Cleans a rgb dataframe by removing uneeded label field
Parameters
----------
rgb_df
RGB dataframe (8X8 or 28X28) to clean
Returns
-------
pandas.core.frame.DataFrame
Cleaned rgb dataframe
"""
rgb_df.drop(columns='label', inplace=True)
return(rgb_df) | 79b9820054d80f1689f54328d86ea3028d9b67c7 | 121,393 |
def to_molar_ratio(massfrac_numerator, massfrac_denominator, numerator_mass, denominator_mass):
"""
Converts per-mass concentrations to molar elemental ratios.
Be careful with units.
Parameters
----------
numerator_mass, denominator_mass : float or array-like
The atomic mass of the numerator and denominator.
massfrac_numerator, massfrac_denominator : float or array-like
The per-mass fraction of the numnerator and denominator.
Returns
-------
float or array-like : The molar ratio of elements in the material
"""
return (massfrac_numerator / numerator_mass) / (massfrac_denominator / denominator_mass) | ef97800ff2ace7b783cbae66f415dd1e0babc14f | 121,398 |
def in_region(pos, b, e):
""" Returns pos \in [b,e] """
return b <= pos and pos <= e | 274d65c05a3af0d70ce61ed50052d57bbbcef000 | 121,402 |
def hasTagValue(fluiddb, objectId, path):
"""
Tests if an object has a given tag.
"""
return fluiddb.objects[objectId][path].head() | 87d67b4b8e07b5e61856849e8eec6593cef03bdd | 121,405 |
def inc_avg(li):
"""
Calculate the average incrementally.
Input: a list.
Output: average of the list.
See http://ubuntuincident.wordpress.com/2012/04/25/calculating-the-average-incrementally/ .
>>> inc_avg([2, 3, 4])
3.0
"""
left = 0
right = len(li) - 1
avg = li[left]
left += 1
while left <= right:
curr = left + 1
avg += (li[left] - avg) / float(curr)
left += 1
return avg | 72f74e3b75648afba650b7dec167519e31ef352b | 121,407 |
def isOutOfBounds(row: int, col: int, rowBound: int, colBound: int) -> bool:
"""
Checks if a row and col are out of bounds for a double array
"""
if row < 0 or row >= rowBound or col < 0 or col >= colBound:
return True
return False | 37e7157a8a02b4b78a3ea758017d5b603d3eb8f6 | 121,408 |
def set_to_bounds(i_s, i_e, j_s, j_e, low=0, high=1):
"""
Makes sure the given index values stay with the
bounds (low and high). This works only for 2 dimensional
square matrices where low and high are same in both
dimensions.
Arguments:
i_s : Starting value for row index
i_e : Ending value for row index
j_s : Starting value for column index
j_e : Ending value for column index
low : Minimum value for index possible
high : Maximum value for index possible
Returns:
Values within bounds for given index positions
"""
if i_s < low:
i_s = low
if i_e > high:
i_e = high
if j_s < low:
j_s = low
if j_e > high:
j_e = high
return i_s, i_e, j_s, j_e | 765fb315238159111bb2423444b9575ff04382af | 121,409 |
import time
def isodate_to_localtime(datestr):
"""Convert ISO date to local time"""
return time.strptime(datestr[:16], "%Y-%m-%d %H:%M:%S") | 93221711bf247e14bc251e0b4305be172e25957f | 121,411 |
import re
def uppercase_school_categories(name):
"""Adjusts school names by certain conventions
Changes Hs -> HS and Es -> ES in school name
as well as handles some specific cases.
Parameters
----------
name : str
School name which should be harmonized.
Returns
-------
str
Modified school name
"""
name = re.sub('\sHs(?![A-Za-z0-9])', ' HS', name)
name = re.sub('\sEs(?![A-Za-z0-9])', ' ES', name)
name = re.sub('^Cics\s', 'CICS ', name)
name = re.sub('^Yccs\s', 'YCCS ', name)
return name | 5d9ce848a8bcc4566abe81c25880ca6c7db99ae1 | 121,415 |
import six
def make_hashable(thing):
"""
Creates a representation of some input data with immutable collections,
so the output is hashable.
:param thing: The data to create a hashable representation of.
"""
if isinstance(thing, list):
return tuple(make_hashable(elem) for elem in thing)
elif isinstance(thing, set):
return frozenset(make_hashable(elem) for elem in thing)
elif isinstance(thing, dict):
return frozenset((make_hashable(k), make_hashable(v))
for k, v in six.iteritems(thing))
else:
return thing | 42f60d3a0886f7cc8a339ffe0f0c8d5bd2da2873 | 121,416 |
def project(d, ks):
"""Return the subdict of `d` containing only the keys in `ks`."""
return dict((k, d[k]) for k in ks) | c3fe6fa08375eb1c4ad0e0ed909d432643b11120 | 121,418 |
def get_meta_data(c, form_key):
"""
:param c: the database cursor
:param form_key: the ui_table$ui_table_row key from the client message
:return: feature_bool, subscenario_table, subscenario_id_column:
feature_bool is a True/False indicating whether the form_key is for a
feature column; subscenario_table is the relevant subscenario table
with the IDs and names for the form_key; subscenario_id_column is the
name of the column containing the subscenario_id
Get the metadata for each form key (table-rows in the scenario-new view)
from the 'ui_scenario_detail_table_row_metadata' table. The form key is
created by concatenating ui_table, $, and the ui_table_row, so that's
the rule we use here to separate back into ui_table and ui_table_row.
"""
sep = form_key.index("$")
ui_table = form_key[:sep]
ui_table_row = form_key[sep + 1 :]
(subscenario_table, subscenario_id_column) = c.execute(
"""SELECT ui_row_db_subscenario_table,
ui_row_db_subscenario_table_id_column
FROM ui_scenario_detail_table_row_metadata
WHERE ui_table = '{}'
AND ui_table_row = '{}';""".format(
ui_table, ui_table_row
)
).fetchone()
feature_bool = True if ui_table == "features" else False
return feature_bool, subscenario_table, subscenario_id_column | 190fe4299da5276b89d53dfaeba5b9f4c569eac4 | 121,420 |
def variant_record(variant_id, vcf):
"""Get record for one variant from VCF"""
chrom, pos = variant_id.split(":")
chrom = chrom.replace("chr", "")
pos = int(pos)
recs = list(vcf.fetch(chrom, pos - 1, pos, reopen=True))
assert len(recs) == 1, f"Genotype retrieval error: {variant_id}"
return recs[0] | e2abaa00fc30087c3d9dca9085a7c959cccc587d | 121,429 |
def check_param_validation(parsed_param, valid_values):
"""Check whether parsed_param contains any valid param value lised in valid_values.
if yes, return the valid param value appearing in parsed_param;
if no, return False"""
if not valid_values:
return parsed_param
for value in valid_values:
if value in parsed_param:
return value
return False | 91bb72ad5910877a8d6f69031d794362b5a66bdf | 121,439 |
def n_bins(self): # noqa
"""Number of bins of x axis.
:returns: number of bins for x axis
:rtype: int
"""
return self.GetNbinsX() | 0f377b6c249a3358f37cc0be2a9f4c05cbcd3488 | 121,441 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.