content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def xml_dict_to_string(item):
"""The nested XML dictionary to a string."""
# Sort the keys.
keys = list(item.keys())
keys.sort()
# Return the keys and the values.
string = ''
for key in keys:
if key == '':
string += item[key]
else:
# Add content.
string += key
string += '\n'
string += xml_dict_to_string(item[key])
string += '\n'
# Get the name of the section from the key.
section = key[1:].split(' ')[0]
if section[-1] == '>':
section = section[:-1]
string += '</{}>\n'.format(section)
# Return the value.
return string.strip() | 8e9f8caf8493542d8616e502bfcf90834bb903c5 | 36,880 |
from numpy import floor
from numpy.random import rand
def bootstrap_resample(X, size=None):
""" Bootstrap resample an array_like data
Input parameters:
----------------
X: array_like
input data to resample
size: int, optional
length of resampled array, equal to
len(X) if size==None
Returns:
--------
X_resample: resampled data X
"""
n = X.shape[0]
if n <=0:
raise ValueError("data must contain atleast one measurement.")
if size == None:
size = len(X)
resample_idx = floor(rand(size)*len(X)).astype(int)
return X[resample_idx] | 7270f5013b494fde1b5f6b8c91ab98d394ef5601 | 36,881 |
import numpy
def dtype_limits(dtype):
"""Attempt to determine the min and max values of a datatype.
:arg dtype: A numpy datatype.
:returns: a 2-tuple of min, max
:raises ValueError: If numeric limits could not be determined.
"""
try:
info = numpy.finfo(dtype)
except ValueError:
# maybe an int?
try:
info = numpy.iinfo(dtype)
except ValueError as e:
raise ValueError("Unable to determine numeric limits from %s" % dtype) from e
return info.min, info.max | 3c53755cb6bf1bf2574dcf0261dd5959285dce19 | 36,883 |
import re
def setup_madam(args):
"""Create a Madam parameter dictionary.
Initialize the Madam parameters from the command line arguments.
"""
pars = {}
cross = args.nside // 2
submap = args.nside_submap
if submap > args.nside:
submap = args.nside
pars["temperature_only"] = False
pars["force_pol"] = True
pars["kfirst"] = args.destripe
pars["write_map"] = args.destripe
pars["write_binmap"] = args.write_binmap
pars["write_matrix"] = args.write_wcov_inv
pars["write_wcov"] = args.write_wcov
pars["write_hits"] = args.write_hits
pars["nside_cross"] = cross
pars["nside_submap"] = submap
if args.madam_concatenate_messages:
# Collective communication is fast but requires memory
pars["concatenate_messages"] = True
if args.madam_allreduce:
# Every process will allocate a copy of every observed submap.
pars["allreduce"] = True
else:
# Every process will allocate complete send and receive buffers
pars["allreduce"] = False
else:
# Slow but memory-efficient point-to-point communication. Allocate
# only enough memory to communicate with one process at a time.
pars["concatenate_messages"] = False
pars["allreduce"] = False
if args.mapmaker_mask:
pars["file_inmask"] = args.mapmaker_mask
pars["reassign_submaps"] = True
pars["pixlim_cross"] = 1e-3
pars["pixmode_cross"] = 2
pars["pixlim_map"] = 1e-2
pars["pixmode_map"] = 2
# Instead of fixed detector weights, we'll want to use scaled noise
# PSD:s that include the atmospheric noise
pars["radiometers"] = True
pars["noise_weights_from_psd"] = True
if args.madam_parfile is not None:
# Parse all available parameters from the supplied
# Madam parameter file
pat = re.compile(r"\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*")
comment = re.compile(r"^#.*")
with open(args.madam_parfile, "r") as f:
for line in f:
if comment.match(line) is None:
result = pat.match(line)
if result is not None:
key, value = result.group(1), result.group(2)
pars[key] = value
pars["base_first"] = args.mapmaker_baseline_length
pars["basis_order"] = args.madam_baseline_order
# Adaptive preconditioner width
width_min = args.madam_precond_width_min
width_max = args.madam_precond_width_max
if width_min is None:
# madam-precond-width has a default value
width_min = args.mapmaker_precond_width
if width_max is None:
# madam-precond-width has a default value
width_max = args.mapmaker_precond_width
if width_min > width_max:
# it is not an error for these two to match
width_min = width_max
pars["precond_width_min"] = width_min
pars["precond_width_max"] = width_max
#
pars["nside_map"] = args.nside
if args.mapmaker_noisefilter:
if args.madam_baseline_order != 0:
raise RuntimeError(
"Madam cannot build a noise filter when baseline"
"order is higher than zero."
)
pars["kfilter"] = True
else:
pars["kfilter"] = False
pars["fsample"] = args.sample_rate
pars["iter_max"] = args.mapmaker_iter_max
pars["file_root"] = args.mapmaker_prefix
# Translate boolean values. Madam knows how to do this but it
# simplifies pipeline_tools/madam.py
for key, value in pars.items():
if value == "T":
pars[key] = True
elif value == "F":
pars[key] = False
return pars | 004ddcba117ce1e9a92065a565973f4317c8a89c | 36,885 |
def velocity_from_transition_matrix(P, x, deltat):
"""Estimate velocity field from transition matrix (i.e. compute expected displacements)
:param P: transition matrix
:param x: input data -- `N` points of `M` dimensions in the form of a matrix with dimensions `(N, M)`
:param deltat: timestep for which `P` was calculated.
"""
return (P @ x - x)/deltat | 8f4f3bd6c130dacc5e8ec7431c801d03a4a0db00 | 36,886 |
def get_cheat_sheet(cheat_sheet):
"""converts a cheat sheet from .json to string to display
Parameters
----------
:param dictionary cheat_sheet: dictionary that stores the content of given cheat sheet.
:return: a str representation of a cheat sheet.
"""
sheet = []
separator = '\n'
for data_type in cheat_sheet:
sheet.append(f'__**{data_type}**__')
for method in cheat_sheet[data_type]:
method_description = cheat_sheet[data_type][method]
sheet.append(f'**{method}** - {method_description}')
sheet.append('')
return separator.join(sheet) | b8d401e0c73c0f103cf0b3f404a2150b7bc28a36 | 36,887 |
def get_holding_data(holdings, stimulus_data, total_duration, default_holding):
"""Extract holding data from StepProtocol json dict and add amplitude to a holding list.
Args:
holdings (list): list of holding amplitudes (nA) to be updated
stimulus_data (dict): stimulus dict from protocol json file containing holding data
total_duration (float): total duration of the step (ms)
default_holding (float): default value for the custom holding entry
Returns:
a tuple containing
- float: delay of the holding stimulus (ms)
- float: duration of the holding stimulus (ms)
"""
if "holding" in stimulus_data:
holding = stimulus_data["holding"]
# amp can be None in e.g. Rin recipe protocol
if holding["amp"] is not None and holding["amp"] != default_holding:
holdings.append(holding["amp"])
hold_step_delay = holding["delay"]
hold_step_duration = holding["duration"]
else:
hold_step_delay = 0.0
hold_step_duration = total_duration
return hold_step_delay, hold_step_duration | af7d870ba4a85a7767af9d8b73d5befe60519f8d | 36,889 |
def classbook_record1_uuid():
"""Return classbook record1 uuid"""
return "C1A55800-0000-0000-0000-000000000001" | d1452494f64345464108a6804b5632460f7ec40b | 36,890 |
def my_newton(func,x0,fprime,tol=1e-2):
""" re-implementation of scipy version that uses half the calls! """
p0 = x0
for i in range(30):
fval = func(x0)
if fval == 0: return x0,True
gval = fprime(x0)
delt = fval/gval
x0 -= delt
if (abs(delt) < tol):
return x0,True
return x0,False | 24e242de86c4daf8eca7b49265ecfcbaee133982 | 36,891 |
def has_errors(build):
"""Checks if there are errors present.
Args:
build: the whole build object
Returns:
True if has errors, else False
"""
return "errors" in build and len(build["errors"]) | c5934d3c34f0248f20330f3ec2afb94572d624a1 | 36,894 |
def _segment_less_than(a: str, b: str) -> bool:
"""Return True if a is logically less that b."""
max_len = max(len(a), len(b))
return a.rjust(max_len) < b.rjust(max_len) | 37446b4160dd4a99fa445126545b8fb0014e5814 | 36,896 |
def translate_machine_name(n):
"""
"""
return n | 5c195c416d0197791e3f6f2091df54256523b9f8 | 36,897 |
import shlex
def get_ocropy_param(ocropy_profile:dict)->dict:
"""
Get all user-specific parameters from the ocropy-profile,
but only for "ocropus-nlbin","ocropus-gpageseg","ocropus-rpred","ocropus-hocr".
:param ocropy_profile:
:return: dict with parameters and values which are different to the default values
"""
parameters = {}
# only search for specific func parameters
for funcall in ["ocropus-nlbin","ocropus-gpageseg","ocropus-rpred","ocropus-hocr"]:
if funcall in ocropy_profile['parameters']:
parameters[funcall] = ""
for param in ocropy_profile['parameters'][funcall]:
# ignore 'files' param
if param == "files":
continue
# Go one level deeper if necessary
if "-" not in param:
for next_param in ocropy_profile['parameters'][funcall][param]:
if next_param == "files":
continue
if ocropy_profile['parameters'][funcall][param][next_param]['value'] != "" and \
ocropy_profile['parameters'][funcall][param][next_param]['value'] != False:
if "action" in ocropy_profile['parameters'][funcall][param][next_param]:
parameters[funcall] += next_param + " "
else:
parameters[funcall] += next_param + " " + ocropy_profile['parameters'][funcall][param][next_param][
'value'] + " "
else:
if ocropy_profile['parameters'][funcall][param]['value'] != "" and ocropy_profile['parameters'][funcall][param]['value'] != False:
if "action" in ocropy_profile['parameters'][funcall][param]:
parameters[funcall] += param + " "
else:
parameters[funcall] += param+" "+ocropy_profile['parameters'][funcall][param]['value']+" "
parameters[funcall].strip()
parameters[funcall] = shlex.split(parameters[funcall])
return parameters | 8125a7e0d129b8b5eb9efdc04e73a26c23674564 | 36,898 |
def fatorial(num=1, show=False):
"""
Realiza o fatorial de um número digitado pelo usuário.
:param num: O número a ser calculado.
:param show: (Opcional) Mostra ou não o processo da conta.
:return: O valor do Fatorial de um número n.
"""
n = 1
for f in range(num, 0, -1):
if show:
print(f'{f}', 'x ' if f > 1 else '', end='')
n *= f
if f <= 1:
print(f'=', end=' ')
else:
n *= f
return n | 955d037bca0ce5f6425d1b06628df3f6fcc4f90e | 36,899 |
def divide2(a, b):
"""use ``try... except PossibleException... except`` clause.
List all possible exception you can imagine, and leave other unpredictable
exception to ``except clause``.
"""
try:
return a * 1.0 / b
except ZeroDivisionError:
raise ValueError("Zero division Error!")
except Exception as e:
raise e | faab0a8b85bca2250d227e5662e5c298f22216d4 | 36,901 |
import argparse
import pathlib
import os
import subprocess
import tempfile
import shutil
import textwrap
def main() -> int:
"""Execute the main routine."""
parser = argparse.ArgumentParser()
parser.add_argument("--release_dir", help="directory where to put the release", required=True)
args = parser.parse_args()
release_dir = pathlib.Path(args.release_dir)
release_dir.mkdir(exist_ok=True, parents=True)
# set the working directory to the script's directory
script_dir = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
subprocess.check_call(["go", "install", "./..."], cwd=script_dir.as_posix())
if "GOPATH" not in os.environ:
raise RuntimeError("Expected variable GOPATH in the environment")
gopaths = os.environ["GOPATH"].split(os.pathsep)
if not gopaths:
raise RuntimeError("Expected at least a directory in GOPATH, but got none")
# Figure out the main gopath
gopath = pathlib.Path(gopaths[0])
go_bin_dir = gopath / "bin"
bin_path = go_bin_dir / "gocontracts"
# Get gocontracts version
version = subprocess.check_output([bin_path.as_posix(), "-version"], universal_newlines=True).strip()
# Release the binary package
with tempfile.TemporaryDirectory() as tmp_dir:
bin_package_dir = pathlib.Path(tmp_dir) / "gocontracts-{}-linux-x64".format(version)
target = bin_package_dir / "bin/gocontracts"
target.parent.mkdir(parents=True)
shutil.copy(bin_path.as_posix(), target.as_posix())
tar_path = bin_package_dir.parent / "gocontracts-{}-linux-x64.tar.gz".format(version)
subprocess.check_call([
"tar", "-czf", tar_path.as_posix(), "gocontracts-{}-linux-x64".format(version)],
cwd=bin_package_dir.parent.as_posix())
shutil.move(tar_path.as_posix(), (release_dir / tar_path.name).as_posix())
# Release the debian package
with tempfile.TemporaryDirectory() as tmp_dir:
deb_package_dir = pathlib.Path(tmp_dir) / "gocontracts_{}_amd64".format(version)
target = deb_package_dir / "usr/bin/gocontracts"
target.parent.mkdir(parents=True)
shutil.copy(bin_path.as_posix(), target.as_posix())
control_pth = deb_package_dir / "DEBIAN/control"
control_pth.parent.mkdir(parents=True)
control_pth.write_text(textwrap.dedent('''\
Package: gocontracts
Version: {version}
Maintainer: Marko Ristin (marko.ristin@gmail.com)
Architecture: amd64
Description: gocontracts is a tool for design-by-contract in Go.
'''.format(version=version)))
subprocess.check_call(["dpkg-deb", "--build", deb_package_dir.as_posix()],
cwd=deb_package_dir.parent.as_posix(),
stdout=subprocess.DEVNULL)
deb_pth = deb_package_dir.parent / "gocontracts_{}_amd64.deb".format(version)
shutil.move(deb_pth.as_posix(), (release_dir / deb_pth.name).as_posix())
print("Released to: {}".format(release_dir))
return 0 | a35819d91479b432b0c4d9507d26b9803fc34716 | 36,902 |
import numpy as np
def computeAUC(rates):
"""Computes the Area Under Curve (AUC) measure from (recall, precision) values."""
recall, precision = zip(*sorted(rates))
return np.trapz(precision, x=recall) | bb53944402aab58685267df08faa88eb335a4eec | 36,903 |
import math
def summarize_performance_metrics(performance_metrics):
"""
A function that averages all the folds if they exist.
Parameters
----------
performance_metrics: dict
A dictionary containing the fold, metrics targets and values from evaluation.
"""
sumarized_performance_metrics = {}
for metric in performance_metrics.metric.unique():
mean = performance_metrics[performance_metrics.metric == metric]['value'].mean()
std = performance_metrics[performance_metrics.metric == metric]['value'].std()
if math.isnan(std):
std = 0
sumarized_performance_metrics[metric] = {
'mean': mean,
'std': std,
}
return sumarized_performance_metrics | b5184efc5c8d912f6c4a92a8334faf9de90cc60b | 36,905 |
from pathlib import Path
def get_theme_base_dirs_from_settings(theme_base_dirs=None):
"""
Return base directories that contains all the themes.
Example:
>> get_theme_base_dirs_from_settings('/edx/app/ecommerce/ecommerce/themes')
['/edx/app/ecommerce/ecommerce/themes']
Args:
themes_base_dirs (list of str): Paths to themes base directories.
Returns:
(List of Paths): Base theme directory paths
"""
theme_base_dirs_paths = []
if theme_base_dirs:
theme_base_dirs_paths.extend([Path(theme_base_dir) for theme_base_dir in theme_base_dirs])
return theme_base_dirs_paths | 2b0be9a3a65e8ec5356c667ab3ffe223f34245fe | 36,907 |
def partition_data(df, validation_size):
"""
validation_size: float: maximum portion of data per language size of the validation set
returns: a partition of the data into training and validation
"""
def lang_sample(frame):
assert frame.shape[0] >= 10
return frame.sample(frac=1 - validation_size, random_state=0)
train = df.groupby('lang').apply(lang_sample).reset_index(drop=True, level=0)
validation = df[~df.index.isin(train.index)]
print('train unique languages size', train['lang'].unique().size)
print('validation unique languages size', validation['lang'].unique().size)
return train, validation | bff8149aec867ce2b032e882b5889c35e76ea3f9 | 36,909 |
import re
def get_transcripts(transcript_file):
"""
Parses FusionInspector transcript file and returns dictionary of sequences
:param str transcript_file: path to transcript FASTA
:return: de novo assembled transcripts
:rtype: dict
"""
with open(transcript_file, 'r') as fa:
transcripts = {}
regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)"
regex = re.compile(regex_s)
while True:
# Usually the transcript is on one line
try:
info = next(fa)
seq = next(fa)
assert info.startswith('>')
m = regex.search(info)
if m:
transcripts[m.group('ID')] = seq.strip()
except StopIteration:
break
except AssertionError:
print("WARNING: Malformed fusion transcript file")
return transcripts | b5804760ecf2ac2e80d734108882b7917e6409f8 | 36,910 |
import argparse
from pathlib import Path
def _get_parsed_args(args=None) -> argparse.Namespace:
"""Return namespace of parsed command-line arguments."""
p = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
p.add_argument("--dump-path", required=True, type=Path, help="Path to data dump.")
p.add_argument("--svs-root", required=True, type=Path, help="Path to SVS files.")
p.add_argument(
"--polygon-root",
type=Path,
required=True,
help="Path to *-features.csv files containing nuclei segmentations.",
)
p.add_argument(
"--output-size",
default=(256, 256),
nargs=2,
type=int,
help="Size of output images (width, height).",
)
p.add_argument(
"--output-dir",
required=True,
type=Path,
help="Path in which to save images.",
)
p.add_argument(
"--label-regex",
default=None,
help="Only keep regular expressions that match this label.",
)
p.add_argument(
"--roi-name",
default=None,
help="Name of human bounding box label. Assumed to be 500px in size.",
)
p.add_argument(
"--subject-ids",
nargs="+",
help="Only use images from these subject IDs. By default, use all images.",
)
p.add_argument(
"--save-polygon-overlays",
action="store_true",
help="Save a copy of each tile with the polygon overlaid on the nucleus.",
)
return p.parse_args(args) | b321aa2a84bafa3f7ae18a3d34c1a63a2a54b8a4 | 36,911 |
import json
def fetch_credentials(service_name, creds_file="service-credentials.json"):
"""Fetch credentials for cloud services from file.
Params
======
- service_name: a Watson service name, e.g. "discovery" or "conversation"
- creds_file: file containing credentials in JSON format
Returns
=======
creds: dictionary containing credentials for specified service
"""
with open(creds_file, "r") as f:
creds = json.load(f)
return creds[service_name] | 44d658af0dc72aec90cec51093fc5c891553223b | 36,912 |
def Quoted(s):
"""Return the string s within quotes."""
return '"' + str(s) + '"' | 20ddfdd5d815f71f092307438edb808681cecdb8 | 36,914 |
def get_user_choice():
""" Let users know what they can do """
print("\n[1] See Top 50 Airing Anime")
print("[2] See Top 50 Upcoming Anime")
print("[3] See Anime Recommendations")
print("[4] Search for Anime")
print("[q] Quit")
return input("\n[?] Press any key to continue: ") | adb8f7b18bf378795e46de9e6abd0fc64c5bf677 | 36,917 |
import csv
import sys
def save_csv(tabl):
"""Сохраняем список в csv файл"""
FNAME = './data/' + ('table_pvp.csv' if tabl[0] else 'table_pve.csv')
ans = 1
with open(FNAME, 'a', newline='', encoding='utf-8') as csvfile:
f = csv.writer(csvfile, delimiter=';')
try:
f.writerow((tabl[1],
tabl[2][0], tabl[2][1],
tabl[3][0], tabl[3][1],
tabl[4][0], tabl[4][1],
tabl[5][0], tabl[5][1],
tabl[6][0], tabl[6][1],))
ans = 0
except Exception as e:
print(e, file=sys.stderr)
return ans | 385a118e971ad239fed81fa5767cc80603fc07af | 36,919 |
def sum_and_avg(a, b, message="sum and average", *args, **kwargs):
"""
说明文档:求和 均值函数
a: 位置参数
b: 位置参数
message="sum and average": 默认参数
*args: 可变参数,类型:tuple
**kwargs: 可变关键字参数,类型:dict
"""
print(message)
print(args, type(args), len(args))
print(kwargs, type(kwargs), len(kwargs))
c = a+b
for n in args:
c += n
for k, v in kwargs.items():
c += v
return c, c/(2+len(args)+len(kwargs)) | 2b81242d09272236e47910db7a60801a202a75be | 36,920 |
import functools
def start_connection(func):
"""Start a connection if connection is None."""
@functools.wraps(func)
async def wrapper(self, query, connection=None):
if connection:
return await func(self, query=query, connection=connection)
else:
async with self.engine.begin() as connection:
return await func(self, query=query, connection=connection)
return wrapper | 2a7d972bd2c071a268f0fae685933650175544f5 | 36,922 |
def required_index(a):
"""
Helper function to take a list of index lists and return whether it needs to be included as an index in demultiplexing.
"""
return len(set(tuple(a_i) for a_i in a)) != 1 | fb3893c17ce75ae0085c7c6eccf20aab8ac653ee | 36,923 |
def quadEquationSqrt(a: int=1, b: int=2, c: int=3) -> None:
"""
This function prints the `roots` of a `Quadratic Equation` whose `Coefficient` is specified by the `User`. Returns `None` if bound to a variable.
Args: This function takes three arguments in total.
`a: int` :The literal of this argument should be of integer data
`b: int` :The literal of this argument should be of integer data
`c: int` :The literal of this argument should be of integer data
"""
equation = f"{a}x² + {b}x + {c}"
print(f"User-given equation is:\n{equation}")
discriminant = pow(b,2) - (4*a*c)
root1 = (-b - pow(discriminant,0.5))/(2*a)
root2 = (-b + pow(discriminant,0.5))/(2*a)
if root1 == -(root2):
print(f"The roots of '{equation}' is: ±{root2}")
else:
print(f"The roots of '{equation}' are: {root1} and {root2}")
return None | 76ebfd874f860bb4b063702e8ac3df583b0eb2a6 | 36,925 |
def communication_filter(quest, difficulty):
""" Hides a number of steps in the quest equal to the difficulty setting
2 would hide 50%, one every other step. 3 would hide two steps between each revealed step. """
length = len(quest)
revealed = list(range(0,length,difficulty)) # if it starts at 0: always gives the first step
for i,step in enumerate(quest):
if i not in revealed:
quest[i] = "?"
return quest | fb5cfbb0c0361e893db74f3de9bd12c6a7a3db25 | 36,926 |
def krueger12_eta(lpc):
"""Ratio of 56Ni to total iron-peak yield from Krueger+ 2012
Fitting formula for K12 central density results. Based on looking at
iron-peak yields from Khokhlov, Mueller & Hoflich 1993, I assign a
flat prior eta = 0.9 below a central density of 1e+9 g/cm^3.
Could probably do better by incorporating other results e.g. from the
MPA group (Seitenzahl+ 2013). Input lpc (log10 of central density),
output eta = MNi/(MNi + MFe).
"""
pc9 = 1e-9 * 10**lpc
return min(0.95, 0.95 - 0.05*pc9), max(0.025, 0.03*pc9) | 96f87a9c490b0ad0feff6859399977bc58f6b48a | 36,928 |
def default_activation_threshold(spec):
"""
Helper method to use the default balance activation threshold for state creation for tests.
Usage: `@with_custom_state(threshold_fn=default_activation_threshold, ...)`
"""
return spec.MAX_EFFECTIVE_BALANCE | e78dca90771c4750ec48fac3e624b15de3d54e0f | 36,929 |
import numpy as np
def get_sorted_notes(ClusterInfo, target_note_) -> str:
"""
Get sequence of notes based on its median duration
target_note_ : all possible notes that can appear in a session sung by that bird
target_note_ can be different from actual syllable that the bird sang since the bird may drop syllalbes at times
"""
note_median_durs = np.array([], dtype=np.float32)
target_note = ''
for note in target_note_:
ni = ClusterInfo.get_note_info(note)
if ni: # skip if note not available
note_median_durs = np.append(note_median_durs, ni.median_dur)
target_note += note
# Sort the note sequence by its duration
sorted_notes = list(dict(sorted(list(zip(target_note, note_median_durs)), key=lambda x: x[-1])).keys())
return ''.join(sorted_notes) | 9a723263907aa9438cfbe6db41744a8528fba318 | 36,930 |
import re
def list2str(datas):
"""
列表转字符串
:param datas: [1, 2]
:return: (1, 2)
"""
data_str = str(tuple(datas))
data_str = re.sub(",\)$", ")", data_str)
return data_str | 40b8499b836240ddaf007a84a63b44b92df7403e | 36,931 |
import math
def calculateFuelForMass(mass):
"""calculate the fuel for a given mass"""
return math.floor(int(mass) / 3) - 2 | a7605515b99b2c3c57be239ea3e07da0014a4dad | 36,932 |
import collections
def dict_to_namedtuple_spec(dictionary):
"""Coverts non-nested dictionary to namedtuple"""
return collections.namedtuple("model_specification", dictionary.keys())(
**dictionary
) | 31badb2ce5ac0fe0b68539a18f518455d1c9aca7 | 36,933 |
import os
import subprocess
import click
import sys
def find_shell_path(shell_name="bash"):
"""Finds out system's bash interpreter path."""
if not os.name == "nt":
cmd = ["which", "-a", shell_name]
else:
cmd = ["where", shell_name]
try:
c = subprocess.run(
cmd,
universal_newlines=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = c.stdout.split("\n")
output = [_ for _ in output if _]
for path in output:
if path == f"/bin/{shell_name}":
return path
except subprocess.CalledProcessError:
click.secho("Error: Bash not found. Install Bash to use Rush.", fg="magenta")
sys.exit(1) | 29ec68de68bbcd95f847408262eda6930130c239 | 36,934 |
def check_lunches(lunches_dict):
"""Checks if every restaurant posted their lunch already."""
if all(lunches_dict.values()):
return True
else:
return False | 840895437a699601993ba86ff87167b50585f70e | 36,935 |
import hashlib
def compute_password_digest(message):
"""Helper method to compute the message digest for the given string.
"""
return hashlib.sha224(message).hexdigest() | 71d7dd895b998e7b8d9698ec5db7970de4c2cbc6 | 36,936 |
def compose(x, *funcs):
"""
takes an initial value and a list of functions and composes those functions on the initial value
:param x: object to call the composed functions on
:param funcs: list of functions to compose
:return: final output of composition
"""
for func in funcs:
x = func(x)
return x | 11f7db802ff9345d2330b9081f5045fd7131d6c1 | 36,937 |
def _get_updated_tags(update_function, *args):
"""Get an updated list of tags.
:param update_function: The function used to update tags.
:param args: (The resource :py:class:`dict` to get updated tags for., The tags :py:class:`list` to update ``resource`` with.)
:rtype: :py:class:`list`
If the tag list update function modifies the existing tag list then that new list is returned. In all other cases
None is returned.
"""
updated_tags = None
resource, tags = args[:2]
existing_tags = resource.get("tags")
if existing_tags is not None:
existing_tags_set = set(existing_tags)
tags_set = set(tags)
updated_tags_set = update_function(existing_tags_set, tags_set)
if existing_tags_set != updated_tags_set:
updated_tags = list(updated_tags_set)
return updated_tags | 1c2235b3b45e07fdc4d4a69453d0f7264a49fc4f | 36,938 |
import operator
def select_survivors(snakes, num_survivors, survival_thresh):
""" Picks the survivors that stay for next generation of snakes
params:
snakes: list, current generation of snakes of class Snake
num_survivors: int, how many survivors there should be
survival_thresh: float, selection probability threshold that survivors
must meet
returns:
list of survivors of class Snake,
list of tuples of reverse sorted selection probabilities and
the indices of the associated snakes
"""
survivors = []
select_probs = dict()
for i in range(len(snakes)):
select_probs[str(i)] = snakes[i].select_prob
sorted_probs = sorted(select_probs.items(), key = operator.itemgetter(1),
reverse = True)
count = 0
for i in range(len(sorted_probs)):
if (survival_thresh <= sorted_probs[i][1]):
if (count < num_survivors):
survivors.append(snakes[int(sorted_probs[i][0])])
snakes[int(sorted_probs[i][0])].selected = True
count += 1
return survivors, sorted_probs | b5df43703936c1f3130d42ab5fe7596aef6c834d | 36,939 |
from typing import List
def sort_entries(entries: List) -> List:
"""Sort entries, directories first, files second."""
entries.sort(key=lambda e: f"{e['type']} {e['name']}")
return entries | 0fd1f5af7abac84f31ca32e6bf05452d2bbecf1a | 36,940 |
def f_factory(epoch, shrinkage, learner, g):
"""Generate f inplace function."""
return lambda vec: g[epoch](vec) + shrinkage * learner.predict(vec) | b3726520f6b067ba4765fcfc3ce674475f987b5e | 36,941 |
def _calculate_rydeberg_spectra(_n1: int, _n2: int, _R: float) -> float:
"""Calculates the spectra from the Rydeberg formula.
Decorators:
nb.njit
Arguments:
_n1 {int} -- First principal quantum number.
_n2 {int} -- Second principal quantum number.
_R {float} -- Rydeberg constant.
Returns:
float -- Calculated wavelength.
"""
return _R * (1.0 / _n1 ** 2 - 1.0 / _n2 ** 2) | 57de01915ca5148b9344be099e37ab02a973a381 | 36,945 |
def assertion_error():
"""
A decorator to use in comparing assert
@param operator: The operator used for the assert
"""
def decorator(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
message = '[%s] Error is : %s' % (func.__name__, e)
self.logger.error(message)
self.has_error = True
self.take_assert_capture(suffix=message)
if args[-1] is True:
raise AssertionError(message)
elif args[-1] is not None and args[-1] is False:
return
elif not self.config.get_fail_on_assert_error() == 'False':
raise AssertionError(message)
return wrapper
return decorator | 17fed0debe52565efd8b9252942b7f3f6a6c5f66 | 36,947 |
def get_boundary_polyhedra(polyhedra, boundary_x=0, boundary_width=0.5, verbose=True, z_lim=[0, 100]):
"""
get indices of polyhedra at boundary (assumed to be parallel to x-axis)
Parameter
---------
polyhedra: dict
dictionary of all polyhedra
boundary_x: float
position of boundary in Angstrom
boundary_width: float
width of boundary where center of polyhedra are considered in Angstrom
verbose: boolean
optional
z_lim: list
upper and lower limit of polyhedra to plot
Returns
-------
boundary_polyhedra: list
list of polyhedra at boundary
"""
boundary_polyhedra = []
for key, polyhedron in polyhedra.items():
center = polyhedron['vertices'].mean(axis=0)
if abs(center[0] - boundary_x) < 0.5 and (z_lim[0] < center[2] < z_lim[1]):
boundary_polyhedra.append(key)
if verbose:
print(key, polyhedron['length'], center)
return boundary_polyhedra | 43402802aba553500ee292cb237d611c1ea58ae0 | 36,948 |
def assign_node_names(G, parcellation):
"""
Modify nodal attribute "name" for nodes of G, inplace.
Parameters
----------
G : :class:`networkx.Graph`
parcellation : list
``parcellation[i]`` is the name of node ``i`` in ``G``
Returns
-------
:class:`networkx.Graph`
graph with nodal attributes modified
"""
# Assign anatomical names to the nodes
for i, node in enumerate(G.nodes()):
G.node[i]['name'] = parcellation[i]
#
G.graph['parcellation'] = True
return G | 7e948ba751458fe5f527fbd494a4957c3a81ff81 | 36,949 |
def to_var(tensor, use_cuda, requires_grad=True):
"""Transform tensor into variable and transfer to GPU depending on flag"""
if requires_grad:
tensor.requires_grad_()
if use_cuda:
tensor = tensor.cuda()
return tensor | 6854a272936fde28f3668bb498952d1120cdf126 | 36,950 |
import sys
import os
def data_location(app_name: str) -> str:
"""Return path to application data folder.
Args:
app_name (str): name of application
Returns:
str: path to folder
"""
if sys.platform == 'win32':
path = os.path.join(os.environ['APPDATA'], app_name)
else:
path = os.path.expanduser(os.path.join("~", "." + app_name))
return os.path.abspath(path) | eefa3173c7615f8b9cb9d178f1490e1ddb27b617 | 36,951 |
import os
def find_filename_iteratively(start, file_path_suffix, dir_path):
"""Iterates through files in the base path directory to find files.
:param start: String with starting folder prefix.
:param file_path_suffix: String with file end/suffix.
:param dir_path: String with file directory path.
:return: String with found file path.
"""
i = 1
while os.path.exists(dir_path + start + str(i) + file_path_suffix):
i += 1
continue
return dir_path + start + str(i) + file_path_suffix | f20ce054ae27610164de07bbf84061a0d31921f2 | 36,953 |
def reduce_schema_error(message: str) -> str:
"""Reduce the error schema output."""
end_of_message_index = message.find(":", message.find("Failed validating"))
if end_of_message_index != -1:
return message[:end_of_message_index]
return message | 02f4df48b7678c92e71bb03a818cc45494d9c703 | 36,954 |
def floodFill(points, startx, starty):
"""
Returns a set of the (x, y) points of a filled in area.
`points` is an iterable of (x, y) tuples of an arbitrary shape.
`startx` and `starty` mark the starting point (likely inside the
arbitrary shape) to begin filling from.
>>> drawPoints(polygon(5, 5, 4, 5))
,,,O,,,
,,O,O,,
,O,,,O,
O,,,,,O
O,,,,,O
O,,,,,O
,O,,,O,
,OOOOO,
>>> pentagonOutline = list(polygon(5, 5, 4, 5))
>>> floodFill(pentagonOutline, 5, 5)
{(7, 3), (4, 7), (4, 8), (5, 6), (6, 6), (7, 7), (6, 2), (5, 1), (3, 7), (2, 5), (8, 5), (5, 8), (6, 7), (3, 3), (5, 5), (7, 6), (4, 4), (6, 3), (3, 6), (3, 4), (8, 6), (6, 4), (5, 4), (2, 6), (4, 5), (5, 2), (7, 5), (4, 2), (6, 5), (5, 3), (3, 5), (6, 8), (4, 6), (5, 7), (3, 8), (7, 4), (4, 3), (7, 8), (2, 4), (8, 4)}
>>> drawPoints(floodFill(pentagonOutline, 5, 5))
,,,O,,,
,,OOO,,
,OOOOO,
OOOOOOO
OOOOOOO
OOOOOOO
,OOOOO,
,OOOOO,
"""
# Note: We're not going to use recursion here because 1) recursion is
# overrated 2) on a large enough shape it would cause a stackoverflow
# 3) flood fill doesn't strictly need recursion because it doesn't require
# a stack and 4) recursion is overrated.
allPoints = set(points) # Use a set because the look ups will be faster.
# Find the min/max x/y values to get the "boundaries" of this shape, to
# prevent an infinite loop.
minx = miny = maxx = maxy = None
for bpx, bpy in points:
if minx is None:
# This is the first point, so set all the min/max to it.
minx = maxx = bpx
miny = maxy = bpy
continue
if bpx < minx:
minx = bpx
if bpx > maxx:
maxx = bpx
if bpy < miny:
miny = bpy
if bpy > maxy:
maxy = bpy
pointsToProcess = [(startx, starty)]
while pointsToProcess:
x, y = pointsToProcess.pop()
# Process point to right left of x, y.
if x + 1 < maxx and (x + 1, y) not in allPoints:
pointsToProcess.append((x + 1, y))
allPoints.add((x + 1, y))
# Process point to the left of x, y.
if x - 1 > minx and (x - 1, y) not in allPoints:
pointsToProcess.append((x - 1, y))
allPoints.add((x - 1, y))
# Process point below x, y.
if y + 1 < maxy and (x, y + 1) not in allPoints:
pointsToProcess.append((x, y + 1))
allPoints.add((x, y + 1))
# Process point above x, y.
if y - 1 > miny and (x, y - 1) not in allPoints:
pointsToProcess.append((x, y - 1))
allPoints.add((x, y - 1))
return allPoints | a0d139b6736ae4c701b700d0bebb385948925849 | 36,955 |
import subprocess
def cli():
"""
Start the docker container
:return: Subprocess call result
"""
cmd = "docker-compose up -d"
return subprocess.call(cmd, shell=True) | 7fcb0c195f7953c29f2889363dc011947cb0a6a3 | 36,958 |
def _calc_variance(df, lat_col, lon_col, mean_lat, mean_lon):
"""Calculate the partial sum of latitude and longitude column."""
if len(df) > 0:
sum_lat = df.\
apply(lambda row: (row[lat_col]-mean_lat)**2, axis=1).sum()
sum_lon = df.\
apply(lambda row: (row[lon_col]-mean_lon)**2, axis=1).sum()
else:
sum_lat = -1
sum_lon = -1
return [sum_lat, sum_lon] | 3ea9203a4b39823168735adde4c927a2eccab79a | 36,961 |
import re
def normalize_prometheus_label(str):
"""
Prometheus labels must match /[a-zA-Z_][a-zA-Z0-9_]*/ and so we should coerce our data to it.
Source: https://prometheus.io/docs/concepts/data_model/
Every invalid character will be made to be an underscore `_`.
"""
return re.sub(r'[^[a-zA-Z_][a-zA-Z0-9_]*]',"_",str,0) | e851df78d3a7807e9cbaf6415f8059a56ff1d0bc | 36,962 |
def find_tail(tags, num):
"""
Get the position of the end of the aspect/opinion span in a tagging sequence.
Args:
tags (list): List of tags in a sentence.
num (int): Number to look out for that signals the end of an aspect/opinion.
Returns:
"""
last = False
for i, x in enumerate(tags):
if x != num and last:
return i - 1
if x == num + 1:
last = True
return len(tags)-1 if last else -1 | 758a53e752a0a0af87bcaa55605487ce677fa034 | 36,964 |
import os
def get_filename_with_incremented_count(filename):
"""
Get a new filename with a count value appended. The scheme follows the policy for name.conflictBehavior=rename.
See https://dev.onedrive.com/items/create.htm.
:param str filename:
:return str:
"""
name, ext = os.path.splitext(filename)
if ' ' in name:
orig_name, count = name.rsplit(' ', maxsplit=1)
if count.isdigit() and count[0] != '0':
count = int(count) + 1
return orig_name + ' ' + str(count) + ext
return name + ' 1' + ext | deaead05ae6c3facf5080ab5b40cacf392e0c4d7 | 36,966 |
def memoise(func):
"""memoization
trade space for speed by caching function results
"""
results = dict()
def f(*args):
if args in results:
result = results[args]
else:
results[args] = result = func(*args)
return result
f.__name__ = func.__name__
f.__doc__ = func.__doc__
return f | ef6ca75c5a03db7c689e625a63fa4844e20c1df4 | 36,967 |
def time_delta_seconds(t1, t2):
"""Returns the number of seconds between two datetime.time objects"""
t1_s = (t1.hour * 60 * 60 + t1.minute * 60 + t1.second)
t2_s = (t2.hour * 60 * 60 + t2.minute * 60 + t2.second)
return max([t1_s, t2_s]) - min([t1_s, t2_s]) | ba251d741b1c81810a4ac1b4152038f771c23485 | 36,968 |
def elcc_eligible_capacity_rule(mod, proj, period):
"""
:param mod:
:param proj:
:param period:
:return:
"""
return mod.Deliverable_Capacity_MW[proj, period] | 3649cd2737ec6b6c1cb2fff84c0783d20a487c6c | 36,969 |
def url_gen_dicter(inlist, filelist):
"""
Prepare name:URL dicts for a given pair of names and URLs.
:param inlist: List of dictionary keys (OS/radio platforms)
:type inlist: list(str)
:param filelist: List of dictionary values (URLs)
:type filelist: list(str)
"""
pairs = {title: url for title, url in zip(inlist, filelist)}
return pairs | 36d4e066524903f7a5a19f5ca402502561d7ff52 | 36,970 |
def dialect_compare(dialect1, dialect2):
"""
Compares two dialects.
"""
orig = set(dialect1.items())
new = set(dialect2.items())
return dict(
added=dict(list(new.difference(orig))),
removed=dict(list(orig.difference(new)))
) | 107a4fa9c2963713b0e6fd5ec906ea8f8c4ef33c | 36,971 |
import json
def prepare_scenes(config):
"""
prepare scenes and scene_labels
"""
with open(config['train_label_pathname'], 'r') as file:
content_main = json.load(file)
content = content_main['scenes']
train_scenes = list(content.keys())
scene_labels = {}
for scene_name, scene_value in content.items():
scene_labels[scene_name] = scene_value
return train_scenes, scene_labels | b7e7e3cf98a68fddb0b1c02f1c157f6889fb75c2 | 36,972 |
def rgb_to_color(r, g, b):
"""Convert a RGB color into ANTX decimal color."""
r = int(r)
g = int(g)
b = int(b)
r_hexa = hex(r % 256)[2:] # remove '0x'
g_hexa = hex(g % 256)[2:] # remove '0x'
b_hexa = hex(b % 256)[2:] # remove '0x'
hexa = str(r_hexa) + str(g_hexa) + str(b_hexa)
return (int(hexa, 16)*-1) - 1 | c80cd655902ff674734a5ccadeee40f376acdaaa | 36,974 |
def fix_pc_references(s):
"""Translate references to the current program counter from ca65 to ASM6.
ca65 uses * for PC; ASM6 uses $.
Only references at the start or end of an expression or of a
parenthesized subexpression get translated. But that should be
enough for our use case, as the source code can use (*) to produce
($) in the translation.
"""
if s.startswith('*'):
s = '$' + s[1:]
if s.endswith('*'):
s = '$' + s[1:]
return s.replace('(*', '($').replace('*)', '$)') | bee0e8bbf130136d72b30fc444bb75dde3c2e0d2 | 36,975 |
def ns_svg(item_name):
"""Prepends the svg xml-namespace to the item name."""
return '{http://www.w3.org/2000/svg}' + item_name | bad2c5fec183b44e3a04a620b134e233b7810dd0 | 36,976 |
import os
def get_files_count(files, suffix):
"""!
@brief Return the file count of files having a specific file suffix.
"""
return len([f for f in files if os.path.splitext(f)[1] == suffix]) | d3d8837cd42651f932da1aadb069cdb00b4f87a8 | 36,979 |
def named_copy(variable, new_name):
"""Clones a variable and set a new name to the clone."""
result = variable.copy()
result.name = new_name
return result | 14b84aedff4495bb480a98b4c9117b9ea7c03db8 | 36,980 |
def locales():
"""
Returns a tuple of supported locales.
:returns locales: a tuple of all supported locales.
"""
return ("en-US", "es-ES", "fr-FR", "pt-BR", "de-DE", "it-IT", "ru-RU", "tr-TR",
"th-TH", "vi-VN", "zh-TW", "zh-CN", "ja-JP", "ko-KR", "iw-IL", "ar-EG",
"ms-MY", "in-ID", "pl-PL", "tl-PH", "ur-IN", "hi-IN", "my-US", "bn-BD") | e31dc2ea49e687f996d3e1d551a299628f0fc3ae | 36,982 |
def isScoreFile(f):
"""Checks whether file 'f' is a compressed MusicXML score file."""
return "score" in f and "analysis_on" not in f and f.endswith("mxl") | 4a1b4ea9013486ba125f68db87b1b63d4228665d | 36,983 |
def _map_object(result):
"""Return a more human friendly pyVmomi object, by creating a mapping of
the objects name to the literal object.
:Returns: Dictionary
:param result: A series of pyVmomi objects, like vim.Network or vim.Datastore
:type result: List
"""
return {x.name: x for x in result} | 2bdbf2f357f51748a8a02780a54b656118e19e94 | 36,985 |
def reshape_to_ND(arr, N):
""" Adds dimensions to arr until it is dimension N
"""
ND = len(arr.shape)
if ND > N:
raise ValueError("array is larger than {} dimensional, given shape {}".format(N, arr.shape))
extra_dims = (N - ND) * (1,)
return arr.reshape(arr.shape + extra_dims) | c0d75c02e8a11091206a0cef72416853d6671dea | 36,986 |
import requests
def get_text_by_id(id):
"""Get text from Gutenberg based on id.
Project Gutenberg sets a restriction on the way that text on their site
must be downloaded. This function does not honor the restriction, so the
function should be used with care.
Parameters
----------
id : int or str
Identifier.
Returns
-------
text : str or None
The text. Returns none if the page does not exist.
References
----------
https://www.gutenberg.org/wiki/Gutenberg:Information_About_Robot_Access_to_our_Pages
"""
url = "http://www.gutenberg.org/ebooks/{id}.txt.utf-8".format(id=id)
response = requests.get(url)
return response.content | 9e5edbb0d9182be8b2440236ee3af77ddcf78384 | 36,990 |
import glob
import shutil
import os
def ResetInitialAndFinalFolders(sPath_i, sPath_f, IsRemove, sFileType):
"""Reset files in an initial/final sub-folder structure by either moving files back to initial
from final or, optionally, deleting all files from both subfolders
Args:
path_i (String): directory path of 'initial' sub-folder including final path separator
path_f (String): directory path of 'final' sub-folder including final path separator
IsRemove (Boolean): toggle to either (TRUE) delete all files or (FALSE) move files in final
sub-folder to initial without any deletions
sFileType (String): file specifier such as '*.csv' or '*.*' recognizable to glob.glob()
Returns:
i, j, k (Integers): numbers of files relocated, removed from initial and removed from
final subfolders, respectively
"""
i, j, k = 0, 0, 0
for f in glob.glob(sPath_f + sFileType):
if not IsRemove:
shutil.move(f, sPath_i)
i += 1
else:
os.remove(f)
j += 1
if IsRemove:
for f in glob.glob(sPath_i + sFileType):
os.remove(f)
k += 1
return i, j, k | 1450f32c812d70e96ab15aeb191e70a321525e82 | 36,992 |
def is_before(d1, d2):
"""
Return True if d1 is strictly before d2.
:param datetime.date d1: date 1
:param datetime.date d2: date 2
:return: True is d1 is before d2.
:rtype: bool
"""
return d1 < d2 | 194026215e35ce026142e7d0734aaa3ef6b4d254 | 36,993 |
import re
def normalize_profession(profession):
"""
Normalize a profession so that it can be mapped to the text
NOTE: Currently, we only return the last token
"""
profession = re.sub(r'[^\w\s]' , " ", profession.decode("utf-8"), re.UNICODE)#remove punctuations
profession = profession.split()[-1] # only return the last token
profession = profession.lower()# turn into lower case
return profession | e6f0a9f9b7f4d6679f6e849d601f4d4567c47637 | 36,996 |
def br_node(br_location):
"""
return the location for each of the vertex
"""
loc1 = (br_location[0], br_location[2])
loc2 = (br_location[0], br_location[3])
loc3 = (br_location[1], br_location[3])
loc4 = (br_location[1], br_location[2])
return [loc1, loc2, loc3, loc4] | 62177899b0d4c91c073ef1de84c8da9401411be2 | 36,997 |
import re
def regex_or(list_of_strings):
"""Compile a regex matching any of the strings provided."""
re_str = "(" + "|".join(list_of_strings) + ")"
return re.compile(re_str, re.IGNORECASE) | 5967c3bd54025cc37a8ffb44197c927040a45075 | 36,999 |
import os
def olderThanLog(logfile, lognum):
"""Return True if logfile which is of the form log.# has a
sequence number less than lognum"""
(base, extn) = os.path.splitext(logfile)
extn = extn[1:]
if extn == 'gz':
val = int(os.path.splitext(base)[1][1:])
else:
val = int(extn)
return val < lognum | ef666cf653db451f48d025550338451cd9529d80 | 37,000 |
def get_dictionary(filename="c06d"):
"""Return a dictionary of the words and their pronunciations
from the CMU Pronouncing Dictionary.
Each pronunciation will be a string.
"""
pro = dict() # initialize word-pronuncation dictionary
fin = open(filename)
for line in fin:
if line[0]=='#':
continue # Like a soft break; jump straight back to top of loop
# Kind of a shorcut for assigning a list to multiple
# items. The same end result as the two commands
# word = line.split(" ")[0]
# pronounce = line.split(" ")[1]
# The first space on each line of data is between the word
# and its pronunciation.
[word, pronounce] = line.split(" ",1)
pro[word]=pronounce.strip()
return pro | 1ea495e175cf4d87a33ff835d32042ad45a821d4 | 37,002 |
import os
def process_pair_params(func):
"""This decorator allows to enter path to ~/.ssh/id_rsa.pub or provide
id_rsa.pub as plain-text.
"""
def inner(*args, **kwargs):
path = kwargs["keypair"]["keypair"]["public_key"]
if os.path.isfile(path):
with open(path, "r") as sr:
kwargs["keypair"]["keypair"]["public_key"] = sr.read().rstrip()
return func(*args, **kwargs)
return inner | 61118154ce02a68600a66e28e0470e0e9fcbc7df | 37,003 |
import os
import json
def git_hashes():
"""
return git hashes at time of snapshot
"""
path = os.path.join(os.path.dirname(__file__), "snapshot/git_hashes.json")
with open(path, "r") as myfile:
return json.loads(myfile.read().strip()) | 05ff3f4a00cfdc5859341c513c499cf52aa1d22a | 37,004 |
def update_global_event_counts(key_value_pairs):
"""
Function that receives as parameter a Dstream
and applies an update function in order to keep
aggregated information about event types counts.
"""
def update(new_values, accumulator):
if accumulator is None:
accumulator = 0
return sum(new_values, accumulator)
return key_value_pairs.updateStateByKey(update) | 11cb43e85fdc7c2b96b1e503a5ca8293d83c688e | 37,005 |
def serialize_event_person(person):
"""Serialize EventPerson to JSON-like object."""
return {'_type': 'EventPerson',
'id': person.id,
'email': person.email,
'name': person.display_full_name,
'firstName': person.first_name,
'familyName': person.last_name,
'title': person.title,
'affiliation': person.affiliation,
'phone': person.phone,
'address': person.address,
'user_id': person.user_id} | d26cd8a49b7330a786c70ea80664f7e27d836aa7 | 37,006 |
def get_undecided_variable(problem):
"""
Return one variable that is still unset in the problem
"""
for variable, domain in problem['variables'].items():
if len(domain) > 1: # Undecided if more than 1 value possible
return variable | b741f1733f5b20b1dfe389812ce1fe4feedd968c | 37,007 |
def create_slurm_options_string(slurm_options: dict, srun: bool = False):
"""
Convert a dictionary with sbatch_options into a string that can be used in a bash script.
Parameters
----------
slurm_options: Dictionary containing the sbatch options.
srun: Construct options for an srun command instead of an sbatch script.
Returns
-------
slurm_options_str: sbatch option string.
"""
if srun:
option_structure = " {prepend}{key}={value}"
else:
option_structure = "#SBATCH {prepend}{key}={value}\n"
slurm_options_str = ""
for key, value_raw in slurm_options.items():
prepend = '-' if len(key) == 1 else '--'
if key in ['partition', 'p'] and isinstance(value_raw, list):
value = ','.join(value_raw)
else:
value = value_raw
slurm_options_str += option_structure.format(prepend=prepend, key=key, value=value)
return slurm_options_str | 26245b9f253f65775749358e4c69f76dcff733a7 | 37,008 |
import torch
def kld_loss(pred, target, tao=1.0, nonlinear='sqrt', **kwargs):
"""KLD loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert tao >= 1
assert pred.size() == target.size() and target.numel() > 0
if (pred.size()[1] == 5):
x_pred = pred[:, 0]
y_pred = pred[:, 1]
w_pred = pred[:, 2]
h_pred = pred[:, 3]
a_pred = pred[:, 4]
x_target = target[:, 0]
y_target = target[:, 1]
w_target = target[:, 2]
h_target = target[:, 3]
a_target = target[:, 4]
else:
x_pred = (pred[:, 0] - pred[:, 2]) / 2.0
y_pred = (pred[:, 1] - pred[:, 3]) / 2.0
w_pred = torch.abs(pred[:, 0] - pred[:, 2])
h_pred = torch.abs(pred[:, 1] - pred[:, 3])
a_pred = torch.zeros_like(x_pred)
x_target = (target[:, 0] - target[:, 2]) / 2.0
y_target = (target[:, 1] - target[:, 3]) / 2.0
w_target = torch.abs(target[:, 0] - target[:, 2])
# w_target = torch.clamp(w_target, 1e-6)
h_target = torch.abs(target[:, 1] - target[:, 3])
# h_target = torch.clamp(h_target, 1e-6)
a_target = torch.zeros_like(x_target)
pos_inds = torch.nonzero(w_target, as_tuple=True)
if pos_inds[0].size(0)==0:
return torch.tensor(0)
x_pred = x_pred[pos_inds]
y_pred = y_pred[pos_inds]
h_pred = h_pred[pos_inds]
w_pred = w_pred[pos_inds]
a_pred = a_pred[pos_inds]
x_target = x_target[pos_inds]
y_target = y_target[pos_inds]
w_target = w_target[pos_inds]
h_target = h_target[pos_inds]
a_target = a_target[pos_inds]
cos = torch.cos
sin = torch.sin
ln = torch.log
pow = torch.pow
div = torch.div
d_x = x_pred - x_target
d_y = y_pred - y_target
d_a = a_pred - a_target
param1 = 4 * pow(div(d_x * cos(a_target) + d_y * sin(a_target), w_target), 2) + \
4 * pow(div(d_y * cos(a_target) - d_x * sin(a_target), h_target), 2)
param2 = pow(div(h_pred * sin(d_a), w_target), 2) + \
pow(div(w_pred * sin(d_a), h_target), 2) + \
pow(div(h_pred * cos(d_a), h_target), 2) + \
pow(div(w_pred * cos(d_a), w_target), 2)
param3 = ln(div(pow(h_target, 2), pow(h_pred, 2))) + \
ln(div(pow(w_target, 2), pow(w_pred, 2)))
d_pt = 0.5*(param1+param2+param3)-1
if nonlinear == 'sqrt':
d_pt = torch.sqrt(d_pt)
elif nonlinear == 'ln':
d_pt = ln(d_pt+1)
# print('d_pt'+str(d_pt))
loss = 1-div(1, tao+d_pt)
loss = loss.unsqueeze(-1)
# print('loss'+str(loss))
# loss = loss.repeat(1, pred.size()[1])
# loss = loss.unsqueee(1)
# all_loss = torch.zeros_like(pred)
# all_loss[pos_inds[0],:] = loss
loss_num = torch.sum(loss)
return loss_num/pos_inds[0].size(0)
# return loss | 308300d0500d3b571ad4811fab8c74dd3d2a8b32 | 37,010 |
from typing import Optional
from typing import List
from typing import Tuple
from typing import Set
def parse_node_types(node_type_specs: Optional[str]) -> List[Tuple[Set[str], Optional[float]]]:
"""
Parse a specification for zero or more node types.
Takes a comma-separated list of node types. Each node type is a
slash-separated list of at least one instance type name (like 'm5a.large'
for AWS), and an optional bid in dollars after a colon.
Raises ValueError if a node type cannot be parsed.
Inputs should look something like this:
>>> parse_node_types('c5.4xlarge/c5a.4xlarge:0.42,t2.large')
[({'c5.4xlarge', 'c5a.4xlarge'}, 0.42), ({'t2.large'}, None)]
:param node_type_specs: A string defining node types
:returns: a list of node types, where each type is the set of
instance types, and the float bid, or None.
"""
# Collect together all the node types
parsed = []
if node_type_specs:
# Some node types were actually specified
for node_type_spec in node_type_specs.split(','):
try:
# Types are comma-separated
# Then we have the colon and the bid
parts = node_type_spec.split(':')
if len(parts) > 2:
# Only one bid allowed
raise ValueError(f'Cound not parse node type "{node_type_spec}": multiple bids')
# Instance types are slash-separated within an equivalence
# class
instance_types = set(parts[0].split('/'))
for instance_type in instance_types:
if instance_type == '':
# No empty instance types allowed
raise ValueError(f'Cound not parse node type "{node_type_spec}": empty instance type')
# Build the node type tuple
parsed.append((instance_types, float(parts[1]) if len(parts) > 1 else None))
except Exception as e:
if isinstance(e, ValueError):
raise
else:
raise ValueError(f'Cound not parse node type "{node_type_spec}"')
return parsed | 5df63b7e715f35aaf176c69cf3d294991d2dc0a3 | 37,013 |
import yaml
def get_target_directory_from_config_file(cfg_src):
"""
Gets download directory from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
try:
tgt_dir = cfg['downloading']['tgt_dir']
except KeyError as e:
print(
"Unable to retrieve parameter '%s' "
"from configuration file." % e.args[0])
return
except Exception:
print("Unable to read configuration file")
return
return tgt_dir | 873a23d609702feea026d2e7f5ef98b1895239ff | 37,015 |
def check_not_errors_tcp(requets_raw, response_raw, consulta):
"""
Chequea si hay errores en la trama, formato TCP
:param requets_raw: trama con la cual se hizo la solicitud
:type requets_raw: bytes
:param response_raw: trama de respuesta
:type response_raw: bytes
:return: True si la no hay errores
:rtype: bool
"""
OK = 1
# codigo funcion y id no concuerdan con el requests
if requets_raw[:5] != response_raw[:5]:
OK = 0
# la longitud de datos no coincide con la que reporta el esclavo que envia
if len(response_raw[6:]) != (int.from_bytes(response_raw[4:6], "big")):
OK = 0
return OK | e6fb81fa94c1d33ccaee3a2d93e20ab3d3077d06 | 37,016 |
def check_search_query(query: str) -> bool:
"""Checking for a valid search query."""
if query.startswith('/'):
return False
return True | 5921ebb2e98a6b1fae44022b7598518555674e14 | 37,017 |
import io
def readmodifierrules(filename):
"""Read a file containing heuristic rules for marking modifiers.
Example line: ``S *-MOD``, which means that for an S
constituent, any child with the MOD function tag is a modifier.
A default rule can be specified by using * as the first label, which
always matches (in addition to another matching rule, if any).
If none of the rules matches, a non-terminal is assumed to be a complement.
"""
modifierrules = {}
with io.open(filename, encoding='utf8') as inp:
for line in inp:
line = line.strip().upper()
if line and not line.startswith("%"):
label, modifiers = line.split(None, 1)
if label in modifierrules:
raise ValueError('duplicate rule for %r (each label'
' should occur at most once in the file)' % label)
modifierrules[label] = modifiers.split()
return modifierrules | a27420c682a1c8095fe0bd9b63ddcb8b63c9bf74 | 37,019 |
def split_data_set(data_set, axis, value):
"""
根据给定的维度对数据集进行切割
:param data_set: 数据集
:param axis: 维度
:param value: 数据值
:return: 切割后的数据集
"""
temp_data_set = []
for vect in data_set:
if vect[axis] == value:
temp_vect = vect[:axis]
temp_vect.extend(vect[axis+1:])
temp_data_set.append(temp_vect)
return temp_data_set | 3e2234a8dbcd01c53901d2ff7be6816bde826acb | 37,020 |
def invert_prob(img):
"""
Prepares the prob image for post-processing. We have to invert
the values in img. Minimums become and maximas and vice versa.
It can convert from float -> to uint8 if needed.
Args:
img: 2-D matrix.
Returns:
The inversion of matrix img.
"""
img = img.max() - img
return img | d5c33ecc6628a6149c1e28a36bdfc42e9ec14da4 | 37,021 |
def num_spike_powers(FWHM):
"""
num_spike_powers(FWHM):
Return the (approx.) number of powers from a triangular spike
pulse profile which are greater than one half the power
perfect sinusoidal pulse profile. Both the spike and the
sine are assumed to have an area under one full pulse of 1 unit.
Note: A gaussian profile gives almost identical numbers of
high powers as a spike profile of the same width. This
expression was determined using a least-squares fit.
(Errors get large as FWHM -> 0).
'FWHM' is the full width at half-max of the spike.
(0.0 < FWHM <= 0.5)
"""
return -3.95499721563e-05 / FWHM**2 + 0.562069634689 / FWHM - \
0.683604041138 | 49bdca3d3e10f0aaddfff167a62ded0c35d694e9 | 37,023 |
import math
def out_size(dim_in, k, s, p, d):
"""Calculates the resulting size after a convolutional layer.
Args:
dim_in (int): Input dimension size.
k (int): Kernel size.
s (int): Stride of convolution.
p (int): Padding (of input).
d (int): Dilation
"""
return math.floor((dim_in + 2 * p - d * (k - 1) - 1) / s + 1) | 755d5769953b68f4c772332381576f95aed2022e | 37,025 |
import os
def _rel_path_if_descendant(path, root):
"""
Take a path and return either an absolute path or a path relative to root.
If the path does not exists, it returns None.
:param path:
:param root:
:return:
"""
real_root = os.path.realpath(root)
real_path = os.path.realpath(path)
if real_path.startswith(real_root + os.path.sep) or real_path == real_root:
p = os.path.relpath(real_path, real_root)
else:
p = path
# The following line exploits a feature of os.path.join: If a component is an absolute path, all
# previous components are thrown away and joining continues from the absolute path component.
# In other words: if p is absolute, real_root is ignored.
if os.path.exists(os.path.join(real_root, p)):
return str(p) | 9bc2cf3928bfbbb241f76c8d4ee41693e7f8fd6e | 37,027 |
def extract_nucleotide_information(seq, start, end):
"""
this function returns the relative nucleotide ratio comparing the the drop and to the entire genome
:param seq: genomic sequence
:param start: start position. int
:param end: end position. int
:return: a list with tuples for each nucleotide in the following order: A,C,G,T. (nuc relative in drop, nuc relative to genome)
"""
drop_sequence = seq[start: end + 1]
a_total = seq.count('a')
c_total = seq.count('c')
g_total = seq.count('g')
t_total = seq.count('t')
a_in_drop = drop_sequence.count('a')
c_in_drop = drop_sequence.count('c')
g_in_drop = drop_sequence.count('g')
t_in_drop = drop_sequence.count('t')
len_drop = len(drop_sequence)
a_rel_genome = a_in_drop / a_total
c_rel_genome = c_in_drop / c_total
g_rel_genome = g_in_drop / g_total
t_rel_genome = t_in_drop / t_total
a_rel_drop = a_in_drop / len_drop
c_rel_drop = c_in_drop / len_drop
g_rel_drop = g_in_drop / len_drop
t_rel_drop = t_in_drop / len_drop
return [(a_rel_genome, a_rel_drop), (c_rel_genome, c_rel_drop), (g_rel_genome, g_rel_drop),
(t_rel_genome, t_rel_drop)] | 8dfbe31257226ded80ec55d171b0d75f305641b1 | 37,028 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.