content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_photon_energy(wavelengths):
"""
computes the energy of the photon of a given wavelength
:param wavelengths: [m]
:return: J = W*s
"""
plank_constant = 6.62606957 * 10**-34 # J*s
speed_of_light = 299792458 # m*s^-1
nu = speed_of_light / wavelengths # s^-1
E = plank_constant * nu # J = W*s
return E | 4c6985c90465cbcd79f204219762f13bc4a71203 | 43,669 |
def ALL_ELEMENTS_TRUE(*expressions):
"""
Evaluates an array as a set and returns true if no element in the array is false. Otherwise, returns false.
An empty array returns true.
https://docs.mongodb.com/manual/reference/operator/aggregation/allElementsTrue/
for more details
:param expressions: The arrays (expressions)
:return: Aggregation operator
"""
return {'$allElementsTrue': list(expressions)} | 33c343c42bc8bcdf9dfdddf643c481fd85f4a784 | 43,670 |
def t_regression(b, beta, sb):
"""
"""
t = (b - beta) / sb
return t | 262909fd11bf93b2d298c174aa45ae5b5281c33a | 43,671 |
def compute_auxiliary_reward(past_reward, past_act, n_episode, max_episode_steps):
""" returns auxiliary rewards for drawing history """
return past_reward | 12d66d19423de81a94726206b48edf69187ff042 | 43,673 |
def sort_two_lists(list1, list2):
"""Sort both lists based on list1"""
list1, list2 = (list(t) for t in zip(*sorted(zip(list1, list2))))
return list1, list2 | 95c9273170f37c1f96d0ed95c83049c61ebc8a00 | 43,675 |
import os
def find_venv():
"""Get the path of the current virtual environment
Returns
-------
path : str
Path to the venv, or `None` if we are not in a virtual environment.
"""
return os.environ.get("VIRTUAL_ENV", None) | 5c8ffd37f406ac5adc5b4b179b46c565e269c1a9 | 43,676 |
def cmpTup2(tupA, tupB):
"""A comparator function that compares two tuples on the basis of the
value of their second element."""
if (tupA[1] < tupB[1]):
return -1
elif (tupA[1] == tupB[1]):
return 0
else:
return 1 | b41dcdd85711027116b503d2a68b2a97c16013b2 | 43,677 |
def _use_cache(outputs, use_cache):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
if len(outputs) <= 1 or use_cache is False:
return False
return True | 9bb2514437ef6db04460e1a67d61b32b5bf09b5c | 43,678 |
def sample_trpo_params(trial):
"""
Sampler for TRPO hyperparams.
:param trial: (optuna.trial)
:return: (dict)
"""
gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
timesteps_per_batch = trial.suggest_categorical('timesteps_per_batch', [16, 32, 64, 128, 256, 512, 1024, 2048])
max_kl = trial.suggest_loguniform('max_kl', 0.00000001, 1)
ent_coef = trial.suggest_loguniform('ent_coef', 0.00000001, 0.1)
lam = trial.suggest_categorical('lamdba', [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])
cg_damping = trial.suggest_loguniform('cg_damping', 1e-5, 1)
cg_iters = trial.suggest_categorical('cg_iters', [1, 10, 20, 30, 50])
vf_stepsize = trial.suggest_loguniform('vf_stepsize', 1e-5, 1)
vf_iters = trial.suggest_categorical('vf_iters', [1, 3, 5, 10, 20])
return {
'gamma': gamma,
'timesteps_per_batch': timesteps_per_batch,
'max_kl': max_kl,
'entcoeff': ent_coef,
'lam': lam,
'cg_damping': cg_damping,
'cg_iters': cg_iters,
'vf_stepsize': vf_stepsize,
'vf_iters': vf_iters
} | d3768c949b1bd9dd36131dc418d640b1ca545683 | 43,679 |
def check_convergence(new_measure, old_measure, direction, threshold):
"""Check if the performance meets the given threshold
Args:
new_measure (float): New performance
old_measure (float): Old performance
direction (str): String to indicate how to compare two measures
threshold (float): The given threshold
Returns:
True if the new measure satisfies threshold, False otherwise
"""
sign = 1.0 if direction == 'higher' else -1.0
if sign * (new_measure - old_measure) / old_measure < threshold:
return True
else:
return False | fca1c9deb85c27f36c9e50b9ee9839121778d074 | 43,680 |
def museum_session_key(monkeypatch):
"""
Mock MuseumPlus session key to prevent having to mock the session
initialization
"""
async def mock_get_museum_session_key():
return "fakefakefakefakefakefakefakefake"
monkeypatch.setattr(
"passari.museumplus.connection.get_museum_session_key",
mock_get_museum_session_key
) | ed36de02d17a0b8086dcef7dcb9550aa3b758d99 | 43,681 |
def get_total_project_memberships(project):
"""Return tha total of memberships of a project (members and unaccepted invitations).
:param project: A project object.
:return: a number.
"""
return project.memberships.count() | bd7fc14f45279ec6d13d35e2d41a12a3bbd3ab3e | 43,682 |
import re
def _parse_human_filesize(m):
"""Parses human readable file sizes, such as "1240", "200k", "30m", and
returns them as int. Raises ValueError if the value cannot be parsed."""
try:
return int(m)
except ValueError as e:
match = re.match("^(\\d+)([kmgtp])$", m)
if match:
digits = match[1]
suffix = match[2]
multiplier = 1
for letter in ["k", "m", "g", "t", "p"]:
multiplier *= 1024
if suffix == letter:
return multiplier * int(digits)
raise e | 315166aff676dac777269201820714fc8f8e52f7 | 43,683 |
import torch
def log_prob(p, c=1e-5):
"""
Truncated log_prob for numerical stability.
"""
return torch.log((1.0 - c) * p + c / 2) | b13db9f6377386fff31c69dbc55a84b07d1305fb | 43,684 |
import six
def make_unicode_alphabet():
"""Generates a list of common unicode ranges that exclude combining code points"""
include_ranges = [
(0x0021, 0x0021),
(0x0023, 0x0026),
(0x0028, 0x007E),
(0x00A1, 0x00AC),
(0x00AE, 0x00FF),
(0x0100, 0x017F),
(0x0180, 0x024F),
(0x2C60, 0x2C7F),
(0x16A0, 0x16F0),
(0x0370, 0x0377),
(0x037A, 0x037E),
(0x0384, 0x038A),
(0x038C, 0x038C),
]
return ''.join([
six.unichr(code_point) for current_range in include_ranges for code_point in range(current_range[0], current_range[1] + 1)
]) | 1e87a4159bae901ef1c18251fd02394f0ba851e1 | 43,685 |
def dict_ajout(D, k, v):
""" dict[alpha:beta] * alpha * beta -> dict[alpha:beta]"""
# R : dict[alpha:beta]
R = D[:]
R[k] = v
return R | 87f1ca3c571b7a73083ed58d47217744dfccfad5 | 43,686 |
def initialiser_dictionnaire(nom_fichier):
"""Renvoie un dictionnaire d'ensembles contenant tous les mots d'un fichier
de référence sans accents et en minuscules, où l'ensemble correspondant à
la clé k contient tous les mots de longueur k."""
dico_mots = dict()
with open(nom_fichier,"r") as fichier:
for mots in fichier.readlines():
mots = mots.strip()
longueur = len(mots)
if longueur in dico_mots.keys():
dico_mots[longueur].add(mots)
else:
dico_mots[longueur] = set()
dico_mots[longueur].add(mots)
return dico_mots | af04c78d3c39386a8e9ffc5d06c38f3f28a8861c | 43,687 |
def bubble_sort(arr):
"""bubble sort list:arr"""
unsorted_index = len(arr) - 1
is_sorted = False
while not is_sorted:
is_sorted = True
for i in range(0, unsorted_index):
if arr[i] > arr[i+1]:
arr[i], arr[i+1] = arr[i+1], arr[i]
is_sorted = False
unsorted_index -= 1
return arr | 705454e33d532698b6d28e4073f0d37d07386925 | 43,688 |
import sys
def add_stats(parties, template, pretty=False):
"""
Compute and append additional data from the results.
"""
total_vote = 0
total_effective_vote = 0
for party in parties:
total_vote += party["vote"]
if party["result"] >= 5:
total_effective_vote += party["vote"]
# print total vote stats
effective_percentage = 100/float(total_vote)*total_effective_vote
effective_template = "votes: {0:d}\neffective votes: {1:d} ({2:5.2f}%)\n"
sys.stderr.write(effective_template.format(
total_vote,
total_effective_vote,
effective_percentage))
# compute percentage with respect to effective votes
for party in parties:
if party["result"] >= 5:
percentage_effective = party["vote"]*100/float(total_effective_vote)
else:
percentage_effective = 0
party["effective"] = percentage_effective
if pretty:
template += " : {effective:5.2f}"
else:
template += ":{effective:f}"
return parties, template | fe37e13fc1f21992364379539f938257e0872159 | 43,689 |
import os
def get_model_dirs(model_ws):
"""Creates a new model workspace directory, if it does not exists yet.
Within the model workspace directory a few subdirectories are created (if
they don't exist yet):
- figure
- cache
Parameters
----------
model_ws : str
model workspace.
Returns
-------
figdir : str
figure directory inside model workspace.
cachedir : str
cache directory inside model workspace.
"""
figdir = os.path.join(model_ws, 'figure')
cachedir = os.path.join(model_ws, 'cache')
if not os.path.exists(model_ws):
os.makedirs(model_ws)
if not os.path.exists(figdir):
os.mkdir(figdir)
if not os.path.exists(cachedir):
os.mkdir(cachedir)
return figdir, cachedir | 4a48597a3bee1b71744d810f04ae80acf94fb970 | 43,690 |
def get_study(assc, size=5):
"""
Return most annotated genes from association dict
"""
most_annotated = sorted(assc.keys(), key=lambda i: len(assc[i]), reverse=True)
study = most_annotated[:size]
study = frozenset(study)
print(f"### Using the {size} most annotated genes as study: {','.join(study)} ")
return study | 0e37c6220c0d5e5db0904a673ffe6b4d5a42fd5c | 43,693 |
def _getFileType(path):
"""
return the extension of the file based on the path
i.e.: 'MP3' or 'WAVE'
"""
ext = path.split("/")[-1].split(".")[-1]
if ext == "mp3":
return 'MP3'
if ext == "wav":
return "WAVE"
if ext == "jams":
return "JAMS"
else:
return ext | 52d9aecb8f92aefc357b3f4ccb3578e1ed360b6c | 43,694 |
import os
def pg_environment() -> dict:
"""
Returns a dictionary of environment variables starting with ``PG``.
Masks the value of `PGPASSWORD`, if present.
"""
env = {
key: value
for key, value in os.environ.items()
if key.startswith("PG")
}
if "PGPASSWORD" in env:
env["PGPASSWORD"] = "***MASKED***"
return env | 3e83fd85ecc8d1fe332fd2d6315398ed6bf9cd68 | 43,695 |
import math
def zoom_from_altitude(altitude):
""" converts altitude to zoom, really simplistic """
return round(math.log(35200000 / abs(altitude)) / math.log(2)) | a8b6925bc9e1fc99f2621ed20613bfcd1921fdcf | 43,696 |
import numpy
def get_ndpointer_type(a):
"""
Takes a numpy.ndarray and gets a pointer type for that array.
Args:
a(ndarray): the ndarray to get the pointer type for.
Returns:
(PyCSimpleType): the pointer type associated with this array.
Examples:
>>> a = numpy.zeros((3, 4), dtype=float)
>>> a_ptr = get_ndpointer_type(a)
>>> a_ptr
<class 'numpy.ctypeslib.ndpointer_<f8_2d_3x4_C_CONTIGUOUS_ALIGNED_WRITEABLE_OWNDATA'>
>>> a_ptr._dtype_
dtype('float64')
>>> a_ptr._ndim_
2
>>> tuple(int(s) for s in a_ptr._shape_)
(3, 4)
>>> a_ptr._flags_
1285
>>> numpy.ctypeslib.flagsobj(a_ptr._flags_)
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : False
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
return(numpy.ctypeslib.ndpointer(
dtype=a.dtype,
ndim=a.ndim,
shape=a.shape,
flags=a.flags
)) | 4c2569b05b661406662878b58d74dd245a8666e8 | 43,697 |
def _dict_to_dotenv(env_dict):
"""
Converts a ``dict`` to a .env formatted str.
Parameters
----------
env_dict : dict
Key value dictionary representing env variables.
Returns
-------
str
Str representing a .env formatted file structure.
Author
------
Richard Wen <rrwen.dev@gmail.com>
Example
-------
.. jupyter-execute::
from msdss_base_dotenv.tools import _dict_to_dotenv
# Create default key value env
env = dict(USER='msdss', PASSWORD='msdss123')
# Convert to dotenv file str
env_str = _dict_to_dotenv(env)
# Display results
print('env: ' + str(env))
print('env_str: ' + env_str)
"""
out = '\n'.join([k + '=' + str(v) for k, v in env_dict.items()])
return out | 72603e0134762707d1f67e3aed25ba694097090a | 43,698 |
import string
def to_valid_filename(s):
"""Converts arbitrary string (for us domain name)
into a valid file name for caching
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in s if c in valid_chars) | cd327e2f4b6c507b958c65b66dd412c0720a281b | 43,699 |
import torch
def binary_accuracy(y_pred, y_true):
"""Calculates binary accuracy per batch"""
y_pred = y_pred.view(-1, 2)
y_true = y_true.view(-1)
y_pred_max = torch.argmax(y_pred, dim=-1)
correct_pred = (y_pred_max == y_true).float()
acc = (correct_pred.sum() + 1e-5) / (len(correct_pred) + 1e-5)
pos_acc = (correct_pred[y_true == 1].sum() + 1e-5) / (len(correct_pred[y_true == 1]) + 1e-5)
neg_acc = (correct_pred[y_true == 0].sum() + 1e-5) / (len(correct_pred[y_true == 0]) + 1e-5)
return acc, pos_acc, neg_acc | 2c697559fa510a9156364148aa46a8b4d28fbc25 | 43,700 |
import numpy
def flatten_numpy(trials_array, flattened_space):
"""Flatten dimensions"""
flattened_points = numpy.array(
[flattened_space.transform(point[:-1]) for point in trials_array]
)
return numpy.concatenate((flattened_points, trials_array[:, -1:]), axis=1) | 4f987a2d4fd5c5ecabbdc0a98662c6ea4ad135fd | 43,702 |
def count_violations_lipinski(molecular_weight, slogp, num_hbd, num_hba):
"""Lipinski, J Pharmacol Toxicol Methods. 2000 Jul-Aug;44(1):235-49.
"""
n = 0
if molecular_weight < 150 or molecular_weight > 500:
n += 1
if slogp > 5:
n += 1
if num_hbd > 5:
n += 1
if num_hba > 10:
n += 1
return n | 72a661dd02c2066ac84497d24f7e4ae1369d8865 | 43,703 |
def gera_codigo_graphviz(arestas, visual=False) -> str:
"""Imprime o código fonte para vizualizar o grafo não direcionado com tais arestas.
Para vizualizar, rode:
dot -Tpng -O path.dot
"""
txt = '''graph grafo {
node [shape=box];'''
for A,B in arestas:
tmp = f' {A} -- {B};\n'
txt += tmp
txt += '}'
if visual: print(txt)
return txt | 4b9879b7b52470f73e7f4ff196d96aece053b285 | 43,704 |
def get_symbol_idx(symbol_vector, name):
"""
Find the index of a casadi symbol by its name in a vertcat (vertical concatenation) of symbols.
If the symbol is a vector instead of a scalar, this method returns the index range of the symbol.
"""
v_len = symbol_vector.size()[0]
slice_start = 0
for i in range(v_len):
info = symbol_vector[slice_start:i + 1].info()
if "slice" not in info:
if symbol_vector[slice_start:i + 1].name() == name:
return (slice_start, i + 1) if i != slice_start else i
else:
slice_start = i + 1
return None | 8f4d7c88b8922c3c12efed05540a80a6dc4123ec | 43,705 |
import io
def update_file(path, line_filters, only_if_all_matched=False):
"""Modify a file by matching and replacing individual lines.
`path` is a string identifying the path of the file to modify.
`line_filters` is an iterable container of LineFilter objects.
For each line in the file, each filter is applied in order.
`only_if_all_matched` is a boolean controlling when the file is rewritten.
When `only_if_all_matched` is False, the file is always rewritten, regardless whether any line matched any filter.
When it is True, the file is only rewritten if each filter matched at least one line in the file.
The function returns whether the file was rewritten or not.
"""
sio_obj = io.StringIO()
updated = {flt: False for flt in line_filters}
newlines = None
with open(path, encoding='utf8') as file_obj:
for line_no, line in enumerate(file_obj, 1):
if newlines is None:
newlines = file_obj.newlines
for line_filter in line_filters:
if line_filter.matches(line_filter, line, line_no, path):
line = line_filter.replace(line_filter, line, line_no, path)
updated[line_filter] = True
sio_obj.write(line)
for line_filter, is_updated in updated.items():
if not is_updated:
line_filter.handle_no_matches(line_filter, path)
if not only_if_all_matched or all(updated.values()):
assert isinstance(newlines, str)
with open(path, 'w', encoding='utf8', newline=newlines) as file_obj:
file_obj.write(sio_obj.getvalue())
return True
return False | e179142f30bf178fe47c3a9d4aa87775d9e122d7 | 43,707 |
def meets_criteria(actual, requested):
"""For each resource, determines whether the total satisfies the amount
requested, ignoring any unrequested resource_classes"""
for specifier in requested.keys():
if specifier not in actual:
return False
if actual[specifier] < requested[specifier]:
return False
return True | b5c0d390f0de32c41b75dc5ce5324b4537751c42 | 43,708 |
def sanitize_version(version):
"""
Take parse_version() output and standardize output from older
setuptools' parse_version() to match current setuptools.
"""
if hasattr(version, 'base_version'):
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
cleaned_version = '{}.{}.{}'.format(major, minor, micro)
return cleaned_version | 82a01f329efd1d0836163547a8aee87801dae60a | 43,709 |
def check_multiple_close(a, b, tol=1e-8):
"""check if a = b*i +- tol where i = 1,2,3,4,...
:param a:
:param b:
:param tol:
:return:
"""
remainder = a % b
if remainder < tol:
return True
else:
assert b > remainder, "something wrong."
if (b - remainder) < tol:
return True
return False | fd74830bce6e8b4724cfe4c66f295587c8421931 | 43,710 |
def most_digits(arr):
"""
Returns the maximum length of any of the numbers in the list
"""
length = 0
for num in arr:
num_length = len(f"{num}")
if num_length > length:
length = num_length
return length | 61383769f3b788c6498b9597589a32364401597c | 43,711 |
def euclidean(p1, p2):
"""
Calculate the Euclidean distance between two points (unitless).
For points in the format (longitude, latitude), this is rarely the correct choice.
"""
return p1.distance(p2) | 3747d0ad461ca0ae65680a233272c234821ea7fb | 43,712 |
def _makeMsgRandomFunc(choices):
"""Function factory given `choices`"""
async def func(cli, dest, user=None):
cli.msgrandom(cli, choices, dest, user)
return func | 9a2eca62bcaad93dc4b6be9f4a14d5d53f88161d | 43,714 |
import ctypes
def _malloc_int32_array(n):
"""
Return a pointer to allocated C int32 array of length `n'.
"""
t = ctypes.c_uint32 * n
return t() | 12df2ffff1567cd836b753d2a9e6b52e3711b4d1 | 43,715 |
import glob
def get_files(directory, extension):
"""
Take a directory and an extension and return the files
that match the extension
"""
return glob.glob('%s/*.%s' % (directory, extension)) | 34ce85197f49d39c9a18c15826bbfd0f70fdf0c0 | 43,716 |
def readonly_call_allowed(func):
"""
Marks a method as allowing for invocation when the registry is in a read only state.
Only necessary on non-GET methods.
"""
func.__readonly_call_allowed = True
return func | 669d05fe4bb704acab832876a5fe449740a86cee | 43,720 |
def check_uniqueness(digits: list) -> bool:
"""
Checks if elements in the lists are different.
>>> check_uniqueness([1, 2, 3])
True
>>> check_uniqueness([2, 4, 4])
False
"""
unique = set(digits)
if len(unique) == len(digits):
return True
return False | 1a03da7a01113f39b91650db757c8f6378f5ad9a | 43,721 |
def mapped_actors(data):
"""
Creates a mapping of actors to whom they have acted with.
Returns a dictionary of form {actor_id_1: {actor_id_2}}
"""
d = {}
# Map actor_1 to actor_2 and actor_2 to actor_1
for i in data:
if i[0] not in d:
d[i[0]] = set()
d[i[0]].add(i[1])
if i[1] not in d:
d[i[1]] = set()
d[i[1]].add(i[0])
return d | 8ded3f4a307c5ca156191f507c7449a24b4c75b6 | 43,723 |
def process_input(cast_to_type, variables):
""" Scales the inputs specified between 0,1.
Expects numpy arrays to be in the keyword arguments.
Only for use on public functions."""
def decorator(f):
def wrapper_function(*args, **kwargs):
for var_name in variables:
try:
if kwargs[var_name].dtype != cast_to_type:
kwargs[var_name] = kwargs[var_name].astype(cast_to_type)
kwargs[var_name] = kwargs[var_name]/255. # Replace with scaled version
except KeyError:
raise Exception('{var} must be called via a keyword argument.'.format(var=var_name))
return f(*args, **kwargs)
return wrapper_function
return decorator | 12f769bac15dd08c23dd6c6faa0080a0df58dda1 | 43,724 |
import os
import sys
import subprocess
import shlex
def check_import(module_name):
"""
This function is responsible for checking for errors in python code
It does not check imports
Returns:
Empty string if nothing was found. Otherwise the error
"""
module_name = module_name.split('.')
module_name[-1] = module_name[-1] + '.py'
module_name = os.path.join(os.getcwd(), *module_name)
cmd = f'"{sys.executable}" -m compileall "{module_name}"'
p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
out, _ = p.communicate()
out = out.decode('utf-8')
if out.lower().startswith('compiling'):
out = '\n'.join(out.split('\n')[1:]).strip('* ')
return out | 51984c676240f90c2eae06a001d76ece75c4585a | 43,726 |
def make_roi_header(**param):
"""
Format header data to be written when saving ROI data.
Args:
method (string): integration method.
param (dict): integration parameters.
Returns:
hdr_list (string): header data.
"""
hdr_list = ['== Integration ROI ==']
method = [i for i in param.keys() if "pos" in i][0].split('_pos')[0]
hdr_list.append('Integration method: {}'.format(method))
for k, v in param.items():
hdr_list.append('{}: {}'.format(k, v))
header = "\n".join(['# ' + i for i in hdr_list])
return header | ba3ad2218cab4144e790b539fc7208bf065ce53c | 43,727 |
def tag_check(
tags,
rating,
score,
score_range=[-1e15,1e15],
included_ratings=set(),
banned_tags=set(),
required_tags=set(),
atleast_tags=set(),
atleast_num=0,
):
"""
checks if tags and rating match requirements
"""
if banned_tags & tags:
return False
if required_tags & tags != required_tags:
return False
if len(atleast_tags & tags) < atleast_num:
return False
if rating not in included_ratings:
return False
if int(score) < score_range[0]:
return False
if int(score) > score_range[1]:
return False
return True | 7724b9d37c9642d185b463ea74e7f1810242cfab | 43,728 |
import argparse
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Find sources in the data for a day and produce spectra for each suitable source.")
parser.add_argument("filename", help="The name of the file to be analysed.")
parser.add_argument("-o", "--output_folder", help="The parent folder to write out the spectra.", default='spectra')
parser.add_argument("--extract_only", help="Use the previous source finding results to extract spectra", default=True,
action='store_true')
args = parser.parse_args()
return args | 6529aa30ba3b4e8257fb477afd24c688be86dff5 | 43,729 |
def visualgenome_attributes():
"""VG 200 attributes, collected by TDE (Kaihua Tang et.al., CVPR2020) """
return [
'white', 'black', 'blue', 'green', 'red', 'brown', 'yellow', 'small', 'large', 'wooden',
'silver', 'orange', 'grey', 'tall', 'long', 'dark', 'pink', 'standing', 'round', 'tan', 'glass', 'here',
'wood', 'open', 'purple', 'short', 'plastic', 'parked', 'sitting', 'walking', 'striped', 'brick', 'young',
'gold', 'old', 'hanging', 'empty', 'on', 'bright', 'concrete', 'cloudy', 'colorful', 'one', 'beige', 'bare',
'wet', 'light', 'square', 'closed', 'stone', 'shiny', 'thin', 'dirty', 'flying', 'smiling', 'painted',
'thick', 'part', 'sliced', 'playing', 'tennis', 'calm', 'leather', 'distant', 'rectangular', 'looking',
'grassy', 'dry', 'cement', 'leafy', 'wearing', 'tiled', "man's", 'baseball', 'cooked', 'pictured', 'curved',
'decorative', 'dead', 'eating', 'paper', 'paved', 'fluffy', 'lit', 'back', 'framed', 'plaid', 'dirt',
'watching', 'colored', 'stuffed', 'clean', 'in the picture', 'steel', 'stacked', 'covered', 'full', 'three',
'street', 'flat', 'baby', 'black and white', 'beautiful', 'ceramic', 'present', 'grazing', 'sandy',
'golden', 'blurry', 'side', 'chocolate', 'wide', 'growing', 'chrome', 'cut', 'bent', 'train', 'holding',
'water', 'up', 'arched', 'metallic', 'spotted', 'folded', 'electrical', 'pointy', 'running', 'leafless',
'electric', 'in background', 'rusty', 'furry', 'traffic', 'ripe', 'behind', 'laying', 'rocky', 'tiny',
'down', 'fresh', 'floral', 'stainless steel', 'high', 'surfing', 'close', 'off', 'leaning', 'moving',
'multicolored', "woman's", 'pair', 'huge', 'some', 'background', 'chain link', 'checkered', 'top', 'tree',
'broken', 'maroon', 'iron', 'worn', 'patterned', 'ski', 'overcast', 'waiting', 'rubber', 'riding', 'skinny',
'grass', 'porcelain', 'adult', 'wire', 'cloudless', 'curly', 'cardboard', 'jumping', 'tile', 'pointed',
'blond', 'cream', 'four', 'male', 'smooth', 'hazy', 'computer', 'older', 'pine', 'raised', 'many', 'bald',
'snow covered', 'skateboarding', 'narrow', 'reflective', 'rear', 'khaki', 'extended', 'roman', 'american'
] | 5eaddf4bfdf723fb91ed2d579a6cfbe6d62dc1e7 | 43,731 |
import os
def get_active_version(home, app):
"""
Determine the currently active version of an application.
"""
bin_path = os.path.join(home, "bin", app)
if not os.path.islink(bin_path):
return None
target_path = os.readlink(bin_path)
common_path = os.path.commonpath([home, target_path])
if common_path != home:
return None
parts = target_path[1 + len(common_path):].split(os.sep)
if len(parts) < 2 or parts[0] != "programs":
return None
if not parts[1].startswith(app + "-"):
return None
return parts[1][len(app) + 1:] | f0bbaeee140428441ff76c43b24a4428db524f37 | 43,732 |
def remove_es_keys(hit):
"""
Removes ES keys from a hit object in-place
Args:
hit (dict): Elasticsearch hit object
Returns:
dict: modified Elasticsearch hit object
"""
del hit['_id']
if '_type' in hit:
del hit['_type']
return hit | ad41161cfa999f7fd313f7f4be7d1b7dcb770796 | 43,734 |
import ast
def is_w_mode_open_call(node):
"""Return True if node represents `rasterio.open(path, "w", ...)`"""
return isinstance(
node.func, ast.Attribute
) and node.func.attr == "open" and isinstance(node.func.value, ast.Name) and node.func.value.id == "rasterio" and len(
node.args
) > 1 and isinstance(node.args[1], ast.Str) and node.args[
1
].s == "w" | 7be9af4c6c306e33c35b7ac38138ce4106396893 | 43,738 |
def deep_access(x, path):
"""
elegant deep access to dicts path can be giveme5w.annotated.how.text
:param x:
:param path:
:return:
"""
key_list = path.split('.')
val = x
for key in key_list:
if val:
if isinstance(val, list):
val = val[int(key)]
else:
val = val.get(key)
else:
return None
return val | 3478829da10b2480474197062b7d1b70cf3840d9 | 43,740 |
import numpy
def _evaluate_bases(bases, x):
"""Evaluate basis b at point x
"""
return bases.value(x)
C = numpy.stack([b.value(x) for b in bases])
return C.T | 52eabd9dc3fcbe3f5d431d67c6656b26cab3c4a6 | 43,741 |
import os
def find_file(file, directories):
"""
Find file in the list of directories.
Return full path and corresponding directory.
"""
for directory in directories:
full_path = os.path.join(directory, file)
if os.path.isfile(full_path):
return (full_path, directory)
return (False, False) | 0e748c0ea3f3195f541daf6ff3fc001c6bbeffc1 | 43,742 |
def secured_simple_ping():
"""A valid access token and an appropriate scope are required to access this route.
Used in tests of token authentication.
"""
return "All good. You only get this message if you're authenticated" | 83773c9482ad7236513208a4de778101dda36a5a | 43,743 |
def make_edge(v1, v2):
"""
We store edges as tuple where the vertex indices are sorted (so
the edge going from v1 to v2 and v2 to v1 is the same)
"""
return tuple(sorted((v1, v2))) | 131260b83de9ed61f1175df72ca46c2e8f12ebfd | 43,745 |
def tuple_from_expandable(expandable, field_descriptions):
"""Returns a tuple corresponding to the database order of fields."""
list_ = [expandable.__dict__[name] for name, _ in field_descriptions]
return tuple([expandable.id_] + list_) | 6d33eb71d85c4ba8f7a150195eb351d0c1d3812f | 43,747 |
def text_to_bytes(text, encoding='UTF-8', size=None):
"""
Encode some text or string to a byte array
:param text: text to encode to bytes
:param encoding: optional encoding of the passed string. default to utf-8.
:param size: optional, if given the text will be padded with 0x00 to the right size
:return: a bytes object
"""
res = str(text).encode(encoding)
if size:
res = res.rjust(size, b'\x00')
return res | b8b4b1f2cf0333633aa164c7a6894431feb11cca | 43,748 |
def _uint32(x):
"""Transform x's type to uint32."""
return x & 0xFFFFFFFF | 8b7547738f69b7aa39b40e85852ebbf03cc6fbfa | 43,749 |
def form_gains(epsilon):
"""
Turn reflection coefficients into gains.
Reflection gains are formed via g = 1 + eps
where eps is the reflection coefficient
eps = A exp(2j * pi * tau * freqs + 1j * phs)
Args:
epsilon : dictionary, ant-pol keys and ndarray values
Returns:
gains : dictionary, ant-pol keys and ndarray values
"""
return dict([(k, 1 + epsilon[k]) for k in epsilon.keys()]) | 443b7058494a195e79b1d6cc5596f3d49d84cc1b | 43,750 |
def get_audio_config(features_list):
"""
Converts a list of features into a dictionary understandable by
`data_extractor.AudioExtractor` class
"""
audio_config = {'mfcc': False, 'chroma': False, 'mel': False, 'contrast': False, 'tonnetz': False}
for feature in features_list:
if feature not in audio_config:
raise TypeError(f"Feature passed: {feature} is not recognized.")
audio_config[feature] = True
return audio_config | 737b81a3e37417deb93183ac3cb670f28238db39 | 43,751 |
def compatibility(i, j, i_n, j_n):
"""
Defines the compatibility function
"""
distance = ((i - i_n)**2.0 + (j - j_n)**2.0)**0.5
return 1 if distance > 0 else 0
# return distance > 0 | 8fe28d8b0451d245d039178fbf23f8f452fa9747 | 43,752 |
import sys
def sub_decorator(func):
"""
Decorator to run command lines, catch exceptions.
Args:
func (undefined):
Decorator for subprocess run (try catch errors)
"""
def wrapper_func(*args, **kwargs):
try:
res = func(*args, **kwargs)
return (res)
except Exception as e:
print("{} Exception Detected!\n{}".format(*args, e))
sys.exit(-1)
return wrapper_func | de59bef08cb064b0e1e54245d10897674493c730 | 43,754 |
def get_padding(north, south, west, east, padding=10):
"""
Calculate a reasonable amount of padding for the map
:param north:
:type north:
:param south:
:type south:
:param west:
:type west:
:param east:
:type east:
:param padding:
:type padding:
:return: The amount of padding to apply
:rtype: int
"""
padding /= 100
dlat = abs(north - south)
dlon = abs(east - west)
return round(dlat * padding), round(dlon * padding) | ce0e0b877429cfb5040023da20820893f723aa25 | 43,755 |
def grep_PAW_order(fpot):
"""
helper-function to extract order of PAW potential
"""
with open(fpot, 'r') as pfile:
lines = pfile.readlines()
order = []
for l in range(len(lines)):
if len(lines[l].split()) > 0 and lines[l].split()[0] == 'PAW_PBE':
order.append(lines[l].split()[1])
return(order) | c0ed123f408fd3ae9871d9124ddde35cd284cd62 | 43,756 |
def primes(n):
""" Returns the prime decomposition of n as a list.
This only remains as a useful function, but VkFFT
allows any prime decomposition, even if performance
can be better for prime numbers <=13.
"""
v = [1]
assert (n > 0)
i = 2
while i * i <= n:
while n % i == 0:
v.append(i)
n //= i
i += 1
if n > 1:
v.append(n)
return v | 0a287f97f24e7dba4bdc5698b540a7ad11ee0e51 | 43,758 |
import pickle
def load_model(file_path):
"""
load data from .pkl file from disk
assuming that data is a list containing [x_train, x_validation, y_train, y_validation, actual_labels,
predicted_labels]
:param file_path: file to be loaded with its full path
:return: model data
"""
with open(file_path, "br") as load:
data = pickle.load(load)
return data | 8966fdc7d0d728f5df66076732b77e77f7e519c4 | 43,760 |
def setlsb(component: int, bit: str) -> int:
"""Set Least Significant Bit of a colour component.
"""
return component & ~1 | int(bit) | a7a6298ff591fd0a5ec6aff01d02f9894e3fd7f3 | 43,763 |
import ast
from typing import Dict
def get_sym_to_ast_def_map(gen_ast: ast.FunctionDef) -> Dict[str, ast.AST]:
"""
Establishes a map between symbols and the AST nodes corresponding to their definitions.
Since generators in our case only contain top-level assignments and function-defs, the map
is just a map from the symbol on the LHS / function name, to the AST object corresponding
to the RHS/function-def respectively.
We do not worry about global imports / functions here. It is guaranteed that they will be available
in the compiled version of the generator
"""
result: Dict[str, ast.AST] = {}
for stmt in gen_ast.body:
if isinstance(stmt, ast.Assign):
if len(stmt.targets) != 1:
raise NotImplementedError("Top-Level Assignments should only have a single LHS in generator defs")
if not isinstance(stmt.targets[0], ast.Name):
raise NotImplementedError("Only simple names allowed as LHS in top-level assignments in generator defs")
lhs = stmt.targets[0].id
result[lhs] = stmt
elif isinstance(stmt, ast.FunctionDef):
result[stmt.name] = stmt
elif isinstance(stmt, ast.Pass):
pass
else:
raise NotImplementedError("Top-Level {} not supported in generator defs".format(type(stmt)))
return result | 51bc1d2b7dcad5f6ec2bc4250da77808fdd46234 | 43,764 |
def xor(a, b):
"""XOR two equal buffers (longest truncated)"""
return bytearray([c ^ d for c, d in zip(a, b)]) | 20fd1c07d40381c93e07d7f09c0d08d9b54b2a82 | 43,765 |
def count_short_deals(df):
"""
6. Считает количество сделок в короткой позиции
:param df: - датафрейм с колонкой '<DEAL_DIRECTION>'
:return: - общее количество сделок SHORT
"""
# http://stackoverflow.com/questions/27140860/count-occurrences-of-number-by-column-in-pandas-data-frame?rq=1
return (df['<DEAL_DIRECTION>'] == 'SHORT').sum() | 9338b659bddad4c1130078953720d563f6b4b944 | 43,767 |
def PNT2Tidal_Pv12(XA):
""" TaylorT2 1PN Quadrupolar Tidal Coefficient, v^12 Phasing Term.
XA = mass fraction of object """
XATo2nd = XA*XA
XATo3rd = XATo2nd*XA
return (15895)/(56)-(4595*XA)/(56) - (5715*XATo2nd)/(28)+(325*XATo3rd)/(14) | 2fd86b73db292798a8a552a50a261b45e4d8cbe2 | 43,768 |
def from_cps(codepoints: str) -> str:
"""The unicode datafiles often contain entries in this format, which is super useful for
copy-pasting reference test cases.
"""
return ''.join(chr(int(cp, 16)) for cp in codepoints.split()) | b6150dbc6c651534bb2ceba1b15409402055bd35 | 43,769 |
def longest_repeated_substring(word):
"""
Finds the longest repeated sub-sequence of characters in a string
:param word: string - the word to search in
:return: string - the longest repeated substring.
If there are two or more substrings tied for the longest - the one with the first
instance in word will be returned.
"""
# Since we are looking for the longest - start searching at the maximum possible length
length = len(word) // 2
while length > 0:
index = 0
# Go through the possible starting indexes and check whether the string appears later in word.
while index + length < len(word):
if word[index: index + length] in word[index + length:]:
return word[index: index + length]
index = index + 1
length = length - 1
# If we didn't find any repeated substring - even of length 1, return an empty string
return "" | 4f14392cb1f157364c6309e055502acfb4e0b4f5 | 43,770 |
def is_volume_attached(volume):
""" This function checks if a volume is attached
and returns the answer to taht question as a boolean
"""
if not volume.get('Attachments'):
return False
attached_states = {'attached', 'attaching'}
for attachment in volume['Attachments']:
if attachment.get('State') in attached_states:
return True
# The volume is not attached since the above return
# did not exit the function
return False | 4d6715ee93025b895e4df29457f9d556882c713a | 43,771 |
def _get_normalized_tde_config(tde_config):
"""Normalize the TDE configuration of a SQL database.
Arguments:
tde_config (dict): Raw TDE configuration of a SQL database
Returns:
dict: Normalized TDE configuration
"""
tde_info = {}
tde_enabled = False
tde_status = tde_config.get('status')
if tde_status == 'Enabled':
tde_enabled = True
tde_info['tde_enabled'] = tde_enabled
return tde_info | cdd425ed32b7a16ccf7443f41351a3303614811d | 43,772 |
def is_all_o(values):
"""check if all the given values are O"""
for val in values:
if val != "O":
return False
return True | 8b92c8a4894a5dd0acb01f903808b7dd286638b4 | 43,774 |
def get_markdown_title_id(section_title):
"""Returns the HTML equivalent id from a section title
Arguments:
section_title -- Section title
"""
return section_title.replace(" ", "_").lower() | 54c27cefc5db7685d0f173bd756026764dea7e4a | 43,776 |
def ntimes(string, char):
""" Return number of times character 'char'
occurs in string """
return string.count(char) | c24d0388a159d12f61e0d21302fff35d32357f78 | 43,777 |
def encode_unicode_bytes(my_string):
""" Shim function, converts Unicode to UTF-8 encoded bytes regardless of the source format
Intended for python 3 compatibility mode, and b/c PyCurl only takes raw bytes
"""
if not isinstance(my_string, str):
my_string = repr(my_string)
if isinstance(my_string, str):
return my_string.encode('utf-8')
elif isinstance(my_string, bytes):
return my_string | fe79cb47f995f2e921b73aa9c2403b62b14dc34f | 43,778 |
def create_episode(conn, episode):
"""
Create a new episode into the episodes table
:param conn:
:param episode:
:return: episode id
"""
sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)
VALUES(?,?,?,?,?)'''
cur = conn.cursor()
cur.execute(sql, episode)
return cur.lastrowid | 8b83bfc85d29c938316ffaa1e1f1b7534784c773 | 43,779 |
import re
import argparse
def parse_chunksize(size):
"""Parse chunksize argument"""
match = re.fullmatch(r"(\d+)([KMGT]B)?", size)
if match is None:
raise argparse.ArgumentTypeError("invalid size value: '{}'".format(size))
num, suffix = match.groups("")
return int(num) * (
{
"": 1,
"KB": 1024 ** 1,
"MB": 1024 ** 2,
"GB": 1024 ** 3,
"TB": 1024 ** 4,
}[suffix]
) | 60fb6d7e2db84bac911090c2ea0ca0d9eed3cace | 43,780 |
def ignore_retention_enabled(configurations):
"""Given a bunch of configs, check for special 'ignore retention' flag"""
for config in configurations:
ignored = config.get('ignore_retention', False)
return bool(ignored)
return False | 848ce87882ab047a9e1c94f1ee6fe602aa228cea | 43,781 |
def build_unigram_noise(freq):
"""build the unigram noise from a list of frequency
Parameters:
freq: a tensor of #occurrences of the corresponding index
Return:
unigram_noise: a torch.Tensor with size ntokens,
elements indicate the probability distribution
"""
total = freq.sum()
noise = freq / total
assert abs(noise.sum() - 1) < 0.001
return noise | 9ff50887c8f39f595b05a7f62ea8fcbbd02959a4 | 43,782 |
import os
def get_file_name_from_path(path):
"""Extract the file name from a given path
If path is `/path/to/file/name.ext` then this functions returns `name`
Args
----
path (str) : path to a file of interest
Returns
-------
(str) The file name
"""
return os.path.splitext(os.path.basename(path))[0] | c18b4ffcf5ff42ecc093dea41e97e19a540605d1 | 43,783 |
def Attachment_Extension_Check(attach_list_type,attachments,allow_extension_types):
""" Purpose is to allow file extension to be checked and only allow extensions that are allowed
"""
allowed = False
if allow_extension_types is None:
allow_extension_types = ['.pdf','.xls','.doc','.jpg','.png','.xlsx','.docx']
if attach_list_type == 'multi':
""" Check a list for any attachment not meeting the allow extension list.
"""
allowed = True
for fi in attachments:
extension = fi.name.split('.')
att_ext = str("."+extension[1]).lower()
if att_ext not in allow_extension_types:
allowed = False
else:
""" By Default Assume only a single attachment
"""
extension = attachments.name.split('.')
att_ext = str("."+extension[1]).lower()
if att_ext in allow_extension_types:
allowed = True
return allowed | 314a1b0f7fac50886894531374cb1b9332dc770e | 43,784 |
import argparse
import os
def parse_args():
"""
# --------------------------------------------------------------------------
# parse command line arguments and return a dict of options
# --------------------------------------------------------------------------
"""
p = argparse.ArgumentParser(epilog= "Program to create a hierarchical "
"tree of GridComps. This is done either by reading "
"the _GridComp directory names or parsing the GridComp "
"source code for children names. Output is either an "
"ascii table or an xml type file readable by Freemind.")
# top level directory, output type
# --------------------------------
p.add_argument('-c','--color', help='color nodes (edit MAPL_Tree::get_color)', action='store_true')
p.add_argument('-d', '--dir', help='top level GridComp directory', default='.')
p.add_argument('-f','--format', help='output format: ascii (txt) or mindmap (mm)',
choices=['txt','mm'], default='txt')
p.add_argument('-F','--full', help='display full tree', action='store_true')
p.add_argument('-l','--link', help='add external link to nodes (edit MAPL_Tree::get_link)', action='store_true')
p.add_argument('-t','--trim', help='skip non GridComps, shorten names, use bult-in aliases',action='store_true')
p.add_argument('-r','--repo', help='shows only the repository hierarchy',action='store_true')
# Do not document this, it should be removed
# ------------------------------------------
# p.add_argument('-m', '--mode', help='output mode (dirname/chname)', choices=['dirname','chname'], default='dirname')
# p.add_argument('-c','--comp', help='top level GridComp name', required=False)
args = vars(p.parse_args()) # vars converts to dict
# checks on input values
# ----------------------
if not os.path.isdir(args['dir']):
raise Exception('root directory [%s] does not exist' % args['dir'])
### if args['mode'] == 'chname' and not args['rootcomp']:
### p.error("--outtype=chname requires --rootcomp")
return args | 54777cb7ee2c402cdead90e95cad8a9de1ca047d | 43,785 |
def get_dict_key(dic, n=0):
"""
Return the first (or nth) key name from a dict
"""
return list(dic.keys())[n] | 3e2e7bc4eb2264247883cd567cecb67a978096c5 | 43,787 |
from typing import Dict
import math
def calculate_pmi(
compound_dict: dict, word_dic: dict, num_compound: int, num_words: int, normalize: bool = False
) -> Dict[str, float]:
"""Calculate Pointwise Mutual Information between the two words of every word pair in nn_dict.
Args:
compound_dict: Dictionary of compounds and their count.
word_dic: Dictionary of words and their count.
num_compound: Number of compounds.
num_words: Number of words.
normalize: Whether or not normalize the pmi score. Normalized pmi is referred to as npmi.
Returns:
sorted_compound_dict: Dictionary of compounds and their pmi/npmi values, sorted wrt their pmi/npmi.
"""
tmp_compound_dict = compound_dict
for compound, count in tmp_compound_dict.items():
w1w2 = compound.split(" ")
# To filter out compounds that are rare/unique because of strange/misspelled component words.
if float(word_dic[w1w2[0]]) > 10 and float(word_dic[w1w2[1]]) > 10:
p_of_c = float(count) / float(num_words)
p_of_h = float(word_dic[w1w2[0]]) / float(num_words)
p_of_m = float(word_dic[w1w2[1]]) / float(num_words)
pmi = math.log(p_of_c / (p_of_h * p_of_m))
if not normalize:
tmp_compound_dict[compound] = round(pmi, 2)
else:
npmi = pmi / float(-math.log(p_of_c))
tmp_compound_dict[compound] = round(npmi, 2)
else:
tmp_compound_dict[compound] = 0.0
sorted_compound_dict = dict(sorted(tmp_compound_dict.items(), key=lambda e: e[1], reverse=True))
return sorted_compound_dict | 529a33de61d90576fe16d0e22c8ed8e298b7c47d | 43,788 |
import json
def load_json(path, encoding="utf-8"):
"""
获取json, json存储为[{}]格式, like [{'大漠帝国':132}]
:param path: str
:return: json
"""
with open(path, 'r', encoding=encoding) as fj:
model_json = json.load(fj)
return model_json | 4a82258fe773730c6a7c47fa907a09d71e2e81f5 | 43,789 |
import re
def escape_path(key):
"""
Convert a key to a filename by escaping invalid characters.
"""
return re.sub(r"[ \\/]+", "_", key) | 4c63d436f669c8ecac2c6fba6e58c3d5be0b840f | 43,790 |
def assert_lrp_epsilon_param(epsilon, caller):
"""
Function for asserting epsilon parameter choice
passed to constructors inheriting from EpsilonRule
and LRPEpsilon.
The following conditions can not be met:
epsilon > 1
:param epsilon: the epsilon parameter.
:param caller: the class instance calling this assertion function
"""
if epsilon <= 0:
err_head = "Constructor call to {} : ".format(caller.__class__.__name__)
err_msg = err_head + "Parameter epsilon must be > 0 but was {}".format(epsilon)
raise ValueError(err_msg)
return epsilon | 98098ec2e248ffa74d69400ddf2a51236835a645 | 43,792 |
import torch
def _independent(distribution):
""" Make predictive distribution for test set independent.
Parameters
----------
distribution : `torch.distribution.Distribution`
Input distribution.
Returns
-------
distribution : `torch.distribution.Distribution`
Output distribution.
"""
return torch.distributions.normal.Normal(
loc=distribution.mean.flatten(),
scale=distribution.variance.pow(0.5).flatten(),
) | 6443904e46f6598dd8765708c37252e0cc6997b0 | 43,794 |
def caseInsensitiveStringMatch(string1, string2):
"""assumes string1 and string 2 are strings
returns a boolean, True if string1 and string2 match irrespective of case,
else False
"""
return string1.lower() == string2.lower() | 3dfcb39f1fa9ae9d05a46f1f740af7a0e7f4bc84 | 43,795 |
import os
def delete_file(path):
"""Deletes file
Parameters:
path (string): path of file to be deleted
Returns:
Sucess or error-message
"""
try:
os.remove(path)
return("Success!")
except Exception as e:
return(e) | 14443cd05e9832b4fd74bce539dc66e4200d17e4 | 43,796 |
def dmask(d, ks):
"""Copy dictionary ``d`` and remove key list ``ks``."""
d = d.copy()
for k in ks:
if k in d:
del d[k]
return d | d9747a830f7c3e1e13b7e224fdf7caf72d0ea909 | 43,797 |
def is_weekend(row):
"""
Function to determine whether a date row is weekend or not
"""
if row['dayofweek'] == 5 or row['dayofweek'] == 6:
return 1
else:
return 0 | bd7a3e1a814dfe938437d3039d50fb10e60680cd | 43,798 |
def roald5(fname):
"""Very similar as tom2, but due to the fact that a generator comprehension is used
instead of a list comprehension, it is a bit faster."""
with open(fname) as f:
numbers = list(map(int, f.readlines()))
iternums = ((x, y, z) for x in numbers for y in numbers for z in numbers)
for a, b, c in iternums:
if a + b + c == 2020:
return a * b * c | f62294e95fd693994be2cc34f834839f23107f13 | 43,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.