content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def readMoveBaseGoalsFromFile(poses_file):
"""Read and return MoveBaseGoals for the robot-station and patrol-poses.
If the contents of the file do not obey the syntax rules of
_readPosesFromFile(), or if no patrol-poses were found, an IOError
exception is raised.
"""
patrol_poses, station_pose = _readPosesFromFile(poses_file)
_assertNumPatrolPoses(patrol_poses)
patrol_goals = [_createMoveBaseGoalFromPose(x) for x in patrol_poses]
station_goal = None if station_pose==None else _createMoveBaseGoalFromPose(station_pose)
return patrol_goals, station_goal | 5,326,400 |
def get_config(section = None):
"""Load local config file"""
run_config = configparser.ConfigParser()
run_config.read(get_repo_dir() + 'config.ini')
if len(run_config) == 1:
run_config = None
elif section is not None:
run_config = run_config[section]
return run_config | 5,326,401 |
def system_info(x):
""" Get system info. """
return list(os.uname()) | 5,326,402 |
def get_parent(running_list, i, this_type, parent_type):
"""Get the description of an industry group's parent
OSHA industry decriptions are provided in ordered lists;
this function identifies the parent industry group based
on information provided by the groups preceeding it
"""
prior = running_list[i - 1]
if clean_desc(prior.full_desc)[1] == parent_type:
# If the type of the previous group is a parent type then set the
# parent description to previous element's description
parent_desc = str(prior.full_desc)
elif clean_desc(prior.full_desc)[1] == this_type:
# Else if the previous group is the more granular type then set the
# parent description to previous element's parent description
parent_desc = str(prior.parent_desc)
else:
# Otherwise raise a value error
err_msg = 'Unexpected code type: ' + prior
raise ValueError(err_msg)
return parent_desc | 5,326,403 |
def rebalance_binary_class(label, mask=None, base_w=1.0):
"""Binary-class rebalancing."""
weight_factor = label.float().sum() / torch.prod(torch.tensor(label.size()).float())
weight_factor = torch.clamp(weight_factor, min=1e-2)
alpha = 1.0
weight = alpha * label*(1-weight_factor)/weight_factor + (1-label)
return weight_factor, weight | 5,326,404 |
def from_base(num_base: int, dec: int) -> float:
"""Returns value in e.g. ETH (taking e.g. wei as input)."""
return float(num_base / (10 ** dec)) | 5,326,405 |
def make_auth_header(auth_token):
"""Make the authorization headers to communicate with endpoints which implement Auth0 authentication API.
Args:
auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT
(JSON Web Token), its expiry, the scopes granted, and the token type.
Returns:
headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication
required endpoints.
"""
token_type = auth_token['token_type']
access_token = auth_token['access_token']
headers = {
"Content-type": "application/json",
"Authorization": "{token_type} {access_token}".format(
token_type=token_type, access_token=access_token
),
}
return headers | 5,326,406 |
def get_file_in_archive(relative_path, subpath, url, force_extract = False):
"""
Download a zip file, unpack it, and get the local address of a file within this zip (so that you can open it, etc).
:param relative_path: Local name for the extracted folder. (Zip file will be named this with the appropriate zip extension)
:param url: Url of the zip file to download
:param subpath: Path of the file relative to the zip folder.
:param force_extract: Force the zip file to re-extract (rather than just reusing the extracted folder)
:return: The full path to the file on your system.
"""
local_folder_path = get_archive(relative_path=relative_path, url=url, force_extract=force_extract)
local_file_path = os.path.join(local_folder_path, subpath)
assert os.path.exists(local_file_path), 'Could not find the file "%s" within the extracted folder: "%s"' % (subpath, local_folder_path)
return local_file_path | 5,326,407 |
async def create_nats_client(servers: List[str]) -> Optional[NatsClient]:
"""
Create a NATS client for any NATS server or NATS cluster configured to accept this installation's NKey.
:param servers: List of one or more NATS servers in the same NATS cluster.
:return: a connected NATS client instance
"""
settings = get_settings()
client = await nats.connect(
verbose=True,
servers=servers,
nkeys_seed=os.path.join(
settings.connect_config_directory, settings.nats_nk_file
),
tls=get_ssl_context(ssl.Purpose.SERVER_AUTH),
allow_reconnect=settings.nats_allow_reconnect,
max_reconnect_attempts=settings.nats_max_reconnect_attempts,
)
logger.info("Created NATS client")
logger.debug(f"Created NATS client for servers = {servers}")
return client | 5,326,408 |
def _drop_additional_columns(
pdf: PandasDataFrame,
column_names: Tuple,
additional_columns: Tuple,
) -> PandasDataFrame:
"""Removes additional columns from pandas DataFrame."""
# ! columns has to be a list
to_drop = list(compress(column_names, additional_columns))
return pdf.drop(columns=to_drop) | 5,326,409 |
def get_cc3d(mask, top=1):
""" 26-connected neighbor
:param mask:
:param top: top K connected components
:return:
"""
msk = connected_components(mask.astype('uint8'))
indices, counts = np.unique(msk, return_counts=True)
indices = indices[1:]
counts = counts[1:]
if len(counts) >= top:
# print(f'Found {len(counts)} connected components')
pass
else:
return 'invalid'
labels = indices[np.argpartition(counts, -top)[-top:]]
for i in range(top):
msk[msk == labels[i]] = 501+i
mn = 501
mx = 501 + top - 1
msk[msk < mn] = 500
msk[msk > mx] = 500
msk = msk - 500
return msk | 5,326,410 |
def test_preprocess_consistency_adj_prop():
"""
Test scdrs.pp.preprocess: consistency between sparse+cov+adj_prop and dense+cov+adj_prop
"""
adata, adata_balance, df_cov = load_toy_data_adj_prop()
adata_sparse = adata.copy()
scdrs.pp.preprocess(adata_sparse, cov=df_cov, adj_prop="cell_group")
adata_dense = adata.copy()
adata_dense.X = adata_dense.X.toarray()
scdrs.pp.preprocess(adata_dense, cov=df_cov, adj_prop="cell_group")
# COV
mat_X_sparse = (
adata_sparse.X.toarray()
+ adata_sparse.uns["SCDRS_PARAM"]["COV_MAT"].values.dot(
adata_sparse.uns["SCDRS_PARAM"]["COV_BETA"].values.T
)
+ adata_sparse.uns["SCDRS_PARAM"]["COV_GENE_MEAN"].values
)
max_abs_dif = np.absolute(mat_X_sparse - adata_dense.X).max()
err_msg = (
"Covariate-corrected matrices different between `sparse+cov+adj_prop` and `dense+cov+adj_prop`, max_abs_dif=%0.3e"
% max_abs_dif
)
assert np.allclose(
mat_X_sparse,
adata_dense.X,
rtol=1e-4,
atol=1e-5,
equal_nan=True,
), err_msg
# GENE_STATS
max_abs_dif = np.absolute(
adata_sparse.uns["SCDRS_PARAM"]["GENE_STATS"].values.astype(float)
- adata_dense.uns["SCDRS_PARAM"]["GENE_STATS"].values.astype(float)
).max()
err_msg = (
"'GENE_STATS' different between `sparse+cov+adj_prop` and `dense+cov+adj_prop`, max_abs_dif=%0.3e"
% max_abs_dif
)
assert np.allclose(
adata_sparse.uns["SCDRS_PARAM"]["GENE_STATS"].values.astype(float),
adata_dense.uns["SCDRS_PARAM"]["GENE_STATS"].values.astype(float),
rtol=1e-4,
equal_nan=True,
), err_msg
# CELL_STATS
max_abs_dif = np.absolute(
adata_sparse.uns["SCDRS_PARAM"]["CELL_STATS"].values
- adata_dense.uns["SCDRS_PARAM"]["CELL_STATS"].values
).max()
err_msg = (
"'CELL_STATS' different between `sparse+cov+adj_prop` and `dense+cov+adj_prop`, max_abs_dif=%0.3e"
% max_abs_dif
)
assert np.allclose(
adata_sparse.uns["SCDRS_PARAM"]["CELL_STATS"].values,
adata_dense.uns["SCDRS_PARAM"]["CELL_STATS"].values,
rtol=1e-4,
equal_nan=True,
), err_msg
return | 5,326,411 |
def dscp_class(bits_0_2, bit_3, bit_4):
"""
Takes values of DSCP bits and computes dscp class
Bits 0-2 decide major class
Bit 3-4 decide drop precedence
:param bits_0_2: int: decimal value of bits 0-2
:param bit_3: int: value of bit 3
:param bit_4: int: value of bit 4
:return: DSCP class name
"""
bits_3_4 = (bit_3 << 1) + bit_4
if bits_3_4 == 0:
dscp_cl = "cs{}".format(bits_0_2)
elif (bits_0_2, bits_3_4) == (5, 3):
dscp_cl = "ef"
else:
dscp_cl = "af{}{}".format(bits_0_2, bits_3_4)
return dscp_cl | 5,326,412 |
def load_data(impaths_all, test=False):
"""
Load data with corresponding masks and segmentations
:param impaths_all: Paths of images to be loaded
:param test: Boolean, part of test set?
:return: Numpy array of images, masks and segmentations
"""
# Save all images, masks and segmentations
images = []
masks = []
segmentations = []
# Load as numpy array and normalize between 0 and 1
for im_path in impaths_all:
images.append(np.array(Image.open(im_path)) / 255.)
mask_path = im_path.replace('images', 'mask').replace('.png', '_mask.gif')
masks.append(np.array(Image.open(mask_path)) / 255.)
if not test:
seg_path = im_path.replace('images', '1st_manual').replace('training.png', 'manual1.gif')
else:
seg_path = im_path.replace('images', '1st_manual').replace('test.png', 'manual1.gif')
segmentations.append(np.array(Image.open(seg_path)) / 255.)
return np.array(images), np.expand_dims(np.array(masks), axis=-1), np.expand_dims(np.array(segmentations), axis=-1) | 5,326,413 |
def f_assert_seq0_gte_seq1(value_list):
"""检测列表中的第一个元素是否大于等于第二个元素"""
if not value_list[0] >= value_list[1]:
raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list)
return value_list | 5,326,414 |
def save_touchstone(db, filename):
"""Write a Datablock to a touchstone-format file with name filename.
"""
with open(filename, "w") as fil:
for rad in db_iterator(db, format_touchstone_block):
fil.write("\t".join(rad))
fil.write("\n") | 5,326,415 |
def fetch_regions():
"""Retrieve list of regions from EC2 service."""
ecc = ElasticComputeCloud(SESSION)
deliver_output(ecc.get_regions()) | 5,326,416 |
def download_file(res):
"""
Download file into temporary file
:param res: Response object
:return: downloaded file location
"""
LOGGER.debug("Chunked file download started")
with tempfile.NamedTemporaryFile(delete=False) as file:
for chunk in res.iter_content(chunk_size=config.CHUNK_SIZE):
if chunk:
file.write(chunk)
file.close()
LOGGER.debug("File stored as %s", file.name)
return file.name | 5,326,417 |
def extract_units(name_andor_units):
"""Extracts the number of academic credit units the course is worth.
Returns NaN if the number of units is variable."""
start = name_andor_units.rindex('(') + 1
end = name_andor_units.index(' ', start)
units = name_andor_units[start:end]
try:
return float(units)
except ValueError:
if '/' in units or '-' in units:
LOGGER.debug("Encountered variable units string %s; using NaN", repr(units))
return NaN
LOGGER.error("Encountered unparseable units string %s", repr(units))
raise | 5,326,418 |
def decorate_diff_with_color(contents: List[str]) -> str:
"""Inject the ANSI color codes to the diff."""
for i, line in enumerate(contents):
if line.startswith("+++") or line.startswith("---"):
line = f"\033[1;37m{line}\033[0m" # bold white, reset
elif line.startswith("@@"):
line = f"\033[36m{line}\033[0m" # cyan, reset
elif line.startswith("+"):
line = f"\033[32m{line}\033[0m" # green, reset
elif line.startswith("-"):
line = f"\033[31m{line}\033[0m" # red, reset
contents[i] = line
return '\n'.join(contents) | 5,326,419 |
def _combine_multipliers(first: Dict[Text, float],
second: Dict[Text, float]) -> Dict[Text, float]:
"""Combines operation weight multiplier dicts. Modifies the first dict."""
for name in second:
first[name] = first.get(name, 1.0) * second[name]
return first | 5,326,420 |
def scale_to_one(iterable):
"""
Scale an iterable of numbers proportionally such as the highest number
equals to 1
Example:
>> > scale_to_one([5, 4, 3, 2, 1])
[1, 0.8, 0.6, 0.4, 0.2]
"""
m = max(iterable)
return [v / m for v in iterable] | 5,326,421 |
def test_polymorphic_enumerated_deserialization(valid_enumerated_data):
"""Ensure that a polymorphically deserialized EnumeratedDesignSpace looks sane."""
design_space: EnumeratedDesignSpace = DesignSpace.build(valid_enumerated_data)
assert design_space.name == 'my enumerated design space'
assert design_space.description == 'enumerates some things'
assert len(design_space.descriptors) == 2
real, categorical = design_space.descriptors
assert type(real) == RealDescriptor
assert real.key == 'x'
assert real.units == ''
assert real.lower_bound == 1.0
assert real.upper_bound == 2.0
assert type(categorical) == CategoricalDescriptor
assert categorical.key == 'color'
assert categorical.categories == ['red', 'green', 'blue']
assert len(design_space.data) == 2
assert design_space.data[0] == {'x': 1.0, 'color': 'red'}
assert design_space.data[1] == {'x': 2.0, 'color': 'green'} | 5,326,422 |
def fixed_size_of_type_in_bits(type_ir, ir):
"""Returns the fixed, known size for the given type, in bits, or None.
Arguments:
type_ir: The IR of a type.
ir: A complete IR, used to resolve references to types.
Returns:
size if the size of the type can be determined, otherwise None.
"""
array_multiplier = 1
while type_ir.HasField("array_type"):
if type_ir.array_type.WhichOneof("size") == "automatic":
return None
else:
assert type_ir.array_type.WhichOneof("size") == "element_count", (
'Expected array size to be "automatic" or "element_count".')
element_count = type_ir.array_type.element_count
if not is_constant(element_count):
return None
else:
array_multiplier *= constant_value(element_count)
assert not type_ir.HasField("size_in_bits"), (
"TODO(bolms): implement explicitly-sized arrays")
type_ir = type_ir.array_type.base_type
assert type_ir.HasField("atomic_type"), "Unexpected type!"
if type_ir.HasField("size_in_bits"):
size = constant_value(type_ir.size_in_bits)
else:
type_definition = find_object(type_ir.atomic_type.reference, ir)
size_attr = get_attribute(type_definition.attribute, _FIXED_SIZE_ATTRIBUTE)
if not size_attr:
return None
size = constant_value(size_attr.expression)
return size * array_multiplier | 5,326,423 |
def insecure(path):
"""Find an insecure path, at or above this one"""
return first(search_parent_paths(path), insecure_inode) | 5,326,424 |
def usage():
""" print usage info """
print("usage: mnnconvert [-h]")
print(" [--framework {TF,CAFFE,ONNX,TFLITE,MNN}")
print(" [--modelFile MODELFILE]")
print(" [--prototxt PROTOTXT]")
print(" [--MNNModel MNNMODEL]")
print(" [--fp16 {True,False}]")
print(" [--weightQuantBits {num of bits for weight-only-quant, default:0, which means no quant}]")
print(" [--compressionParamsFile COMPRESSION_PARAMS_PATH]") | 5,326,425 |
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile)
return percentiles | 5,326,426 |
def make_author_list(res):
"""Takes a list of author names and returns a cleaned list of author names."""
try:
r = [", ".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]
except KeyError as e:
print("No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.")
r = ["Unknown Authors"]
return r | 5,326,427 |
def assert_array_equal(x, y, err_msg='', verbose=True, strides_check=False):
"""Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or cupy.ndarray): The actual object to check.
y(numpy.ndarray or cupy.ndarray): The desired, expected object.
strides_check(bool): If ``True``, consistency of strides is also
checked.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_array_equal`
"""
numpy.testing.assert_array_equal(
cupy.asnumpy(x), cupy.asnumpy(y), err_msg=err_msg,
verbose=verbose)
if strides_check:
if x.strides != y.strides:
msg = ['Strides are not equal:']
if err_msg:
msg = [msg[0] + ' ' + err_msg]
if verbose:
msg.append(' x: {}'.format(x.strides))
msg.append(' y: {}'.format(y.strides))
raise AssertionError('\n'.join(msg)) | 5,326,428 |
def backproject_points_np(p, fx=None, fy=None, cx=None, cy=None, K=None):
"""
p.shape = (nr_points,xyz)
"""
if not K is None:
fx = K[0, 0]
fy = K[1, 1]
cx = K[0, 2]
cy = K[1, 2]
# true_divide
u = ((p[:, 0] / p[:, 2]) * fx) + cx
v = ((p[:, 1] / p[:, 2]) * fy) + cy
return np.stack([v, u]).T | 5,326,429 |
def pytest_collection_modifyitems(items):
"""
A pytest hook to filter out mcg tests
when running on openshift dedicated platform
Args:
items: list of collected tests
"""
# Need to update the condition when MCG will get supported
if (
config.ENV_DATA["platform"].lower() == OPENSHIFT_DEDICATED_PLATFORM
or config.ENV_DATA["platform"].lower() == ROSA_PLATFORM
):
for item in items.copy():
if "manage/mcg" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" mcg is not supported on {config.ENV_DATA['platform'].lower()}"
)
items.remove(item) | 5,326,430 |
def configure_dexter_substitutions():
"""Configure substitutions for host platform and return list of dependencies
"""
# Produce dexter path, lldb path, and combine into the %dexter substitution
# for running a test.
dexter_path = os.path.join(config.cross_project_tests_src_root,
'debuginfo-tests', 'dexter', 'dexter.py')
dexter_test_cmd = '"{}" "{}" test'.format(sys.executable, dexter_path)
if lldb_path is not None:
dexter_test_cmd += ' --lldb-executable "{}"'.format(lldb_path)
tools.append(ToolSubst('%dexter', dexter_test_cmd))
# For testing other bits of dexter that aren't under the "test" subcommand,
# have a %dexter_base substitution.
dexter_base_cmd = '"{}" "{}"'.format(sys.executable, dexter_path)
tools.append(ToolSubst('%dexter_base', dexter_base_cmd))
# Set up commands for DexTer regression tests.
# Builder, debugger, optimisation level and several other flags differ
# depending on whether we're running a unix like or windows os.
if platform.system() == 'Windows':
# The Windows builder script uses lld.
dependencies = ['clang', 'lld-link']
dexter_regression_test_builder = '--builder clang-cl_vs2015'
dexter_regression_test_debugger = '--debugger dbgeng'
dexter_regression_test_cflags = '--cflags "/Zi /Od"'
dexter_regression_test_ldflags = '--ldflags "/Zi"'
else:
# Use lldb as the debugger on non-Windows platforms.
dependencies = ['clang', 'lldb']
dexter_regression_test_builder = '--builder clang'
dexter_regression_test_debugger = "--debugger lldb"
dexter_regression_test_cflags = '--cflags "-O0 -glldb"'
dexter_regression_test_ldflags = ''
# Typical command would take the form:
# ./path_to_py/python.exe ./path_to_dex/dexter.py test --fail-lt 1.0 -w --builder clang --debugger lldb --cflags '-O0 -g'
# Exclude build flags for %dexter_regression_base.
dexter_regression_test_base = ' '.join(
# "python", "dexter.py", test, fail_mode, builder, debugger, cflags, ldflags
['"{}"'.format(sys.executable),
'"{}"'.format(dexter_path),
'test',
'--fail-lt 1.0 -w',
dexter_regression_test_debugger])
tools.append(ToolSubst('%dexter_regression_base', dexter_regression_test_base))
# Include build flags for %dexter_regression_test.
dexter_regression_test_build = ' '.join([
dexter_regression_test_base,
dexter_regression_test_builder,
dexter_regression_test_cflags,
dexter_regression_test_ldflags])
tools.append(ToolSubst('%dexter_regression_test', dexter_regression_test_build))
return dependencies | 5,326,431 |
def hp_up(screen, player):
"""Increase the hp of the player"""
player.add_lifes(1) | 5,326,432 |
def recover(D, gamma=None):
"""Recover low-rank and sparse part, using Alg. 4 of [2].
Note: gamma is lambda in Alg. 4.
Parameters
---------
D : numpy ndarray, shape (N, D)
Input data matrix.
gamma : float, default = None
Weight on sparse component. If 'None', then gamma = 1/sqrt(max(D, N))
as shown in [1] to be the optimal choice under a set of suitable
assumptions.
Returns
-------
LL : numpy array, shape (N, D)
Low-rank part of data
SP : numpy array, shape (N, D)
Sparse part of data
n_iter : int
Number of iterations until convergence.
"""
n, m = D.shape
if gamma is None:
gamma = 1/np.sqrt(np.amax([n, m]))
# the following lines implement line 1 of Alg. 4
Y = np.sign(D)
l2n = np.linalg.norm(Y, ord=2)
l2i = np.linalg.norm(np.asarray(Y).ravel(), ord=np.inf)
dual_norm = np.amax([l2n, l2i])
Y = Y/dual_norm
# line 4 of Alg. 4
A_hat = np.zeros(D.shape)
E_hat = np.zeros(D.shape)
D_fro = np.linalg.norm(D, ord='fro')
# cf. section "Choosing Parameters" of [2]
proj_tol = 1e-06*D_fro
term_tol = 1e-07
iter_max = 1e+03
num_svd = 0 # track # of SVD calls
m = 0.5/l2n # \mu in Alg. 4
r = 6 # \rho in Alg. 4
sv = 5
svp = sv
k = 0
converged = False
while not converged:
primal_converged = False
sv = sv+np.round(n*0.1)
primal_iter = 0
while not primal_converged:
# implement line 10 in Alg. 4
T_tmp = D-A_hat+1/m*Y
E_tmp = (np.maximum(T_tmp-gamma/m, 0) +
np.minimum(T_tmp+gamma/m, 0))
# line 7 of Alg. 4
U, S, V = np.linalg.svd(D-E_tmp+1/m*Y, full_matrices=False)
# line 8 of Alg. 4
svp = len(np.where(S > 1/m)[0])
if svp < sv:
sv = np.amin([svp+1, n])
else:
sv = np.amin([svp + np.round(0.05*n), n])
A_tmp = (np.mat(U[:,0:svp]) *
np.diag(S[0:svp]-1/m) *
np.mat(V[0:svp,:]))
# check convergence of inner optimization
if (np.linalg.norm(A_hat-A_tmp, ord='fro') < proj_tol and
np.linalg.norm(E_hat-E_tmp, ord='fro') < proj_tol):
primal_converged = True
A_hat = A_tmp
E_hat = E_tmp
primal_iter = primal_iter+1
num_svd = num_svd+1
# line 13 of Alg. 4
Z = D-A_hat-E_hat
Y = Y+m*Z
m = r*m
# evaluate stopping criteria
stop_crit = np.linalg.norm(Z,'fro')/D_fro
if stop_crit < term_tol:
converged = True
# some information about the iteration
non_zero = len(np.where(np.asarray(np.abs(E_hat)).ravel()>0)[0])
message = ["[iter: %.4d]" % k,
"#svd=%.4d" % num_svd,
"rank(P)=%.4d" % svp,
"|C|_0=%.4d" % non_zero,
"crit=%.4g" % stop_crit]
print ' '.join(message)
k = k+1
# handle non-convergence
if not converged and k > iter_max:
warnings.warn("terminate after max. iter.", UserWarning)
converged = True
return (A_hat, E_hat, k) | 5,326,433 |
def plot_Reff(Reff: dict, dates=None, ax_arg=None, truncate=None, **kwargs):
"""
Given summary statistics of Reff as a dictionary, plot the distribution over time
"""
import matplotlib.pyplot as plt
plt.style.use("seaborn-poster")
from datetime import datetime as dt
if ax_arg is None:
fig, ax = plt.subplots(figsize=(12, 9))
else:
fig, ax = ax_arg
color_cycle = ax._get_lines.prop_cycler
curr_color = next(color_cycle)["color"]
if dates is None:
dates = range(len(Reff["mean"]))
if truncate is None:
ax.plot(dates, Reff["mean"], color=curr_color, **kwargs)
ax.fill_between(
dates, Reff["lower"], Reff["upper"], alpha=0.4, color=curr_color
)
ax.fill_between(dates, Reff["bottom"], Reff["top"], alpha=0.4, color=curr_color)
else:
ax.plot(
dates[truncate[0] : truncate[1]],
Reff["mean"][truncate[0] : truncate[1]],
color=curr_color,
**kwargs
)
ax.fill_between(
dates[truncate[0] : truncate[1]],
Reff["lower"][truncate[0] : truncate[1]],
Reff["upper"][truncate[0] : truncate[1]],
alpha=0.4,
color=curr_color,
)
ax.fill_between(
dates[truncate[0] : truncate[1]],
Reff["bottom"][truncate[0] : truncate[1]],
Reff["top"][truncate[0] : truncate[1]],
alpha=0.4,
color=curr_color,
)
# plt.legend()
# grid line at R_eff =1
ax.set_yticks(
[1],
minor=True,
)
ax.set_yticks([0, 2, 3], minor=False)
ax.set_yticklabels([0, 2, 3], minor=False)
ax.yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax.tick_params(axis="x", rotation=90)
return fig, ax | 5,326,434 |
def get_stylesheet():
"""Generate an html link to a stylesheet"""
return "{static_url}/code_pygments/css/{theme}.css".format(
static_url=core_config['ASSETS_URL'],
theme=module_config['PYGMENTS_THEME']) | 5,326,435 |
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
vol['replication_extended_status'] = replication_extended_status
vol['replication_driver_data'] = replication_driver_data
return db.volume_create(ctxt, vol) | 5,326,436 |
def test_load_none_relationship(author_schema, tag_schema, author_jsonapi):
"""Tests that loading a relationship that is set to None does not load anything."""
author_jsonapi['interests'] = None
author, errors = author_schema(include_schemas=(tag_schema,)).load(author_jsonapi)
assert errors == {}
assert author.id == int(author_jsonapi['data']['id'])
assert author.name == author_jsonapi['data']['attributes']['name']
assert not hasattr(author, 'interests') | 5,326,437 |
def _writen(fd, data):
"""Write all the data to a descriptor.
reference: https://github.com/python/cpython/blob/main/Lib/pty.py#L124
"""
while data:
n = os.write(fd, data)
data = data[n:] | 5,326,438 |
def duration_of_treatment_30():
"""
Real Name: b'duration of treatment 30'
Original Eqn: b'10'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 10 | 5,326,439 |
def input_pkgidx(g_dim):
"""
Specify the parking spots index by the user
return 1*pk_dim np.array 'pk_g_idx' where pk_dim is the number of spots
"""
#print('Please specify the num of parking spots:')
pk_dim = np.int(input('Please specify the num of parking spots:'))
while pk_dim >= g_dim:
print('Too many parking spots!')
pk_dim = np.int(input('Please specify the num of parking spots:'))
pk_g_idx = -np.ones(pk_dim, dtype = int)
for idx in range(pk_dim):
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while spot_idx in pk_g_idx:
print('Repeated input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
pk_g_idx[idx] = spot_idx
pk_g_idx.sort()
return pk_g_idx | 5,326,440 |
def get_total_count(data):
"""
Retrieves the total count from a Salesforce SOQL query.
:param dict data: data from the Salesforce API
:rtype: int
"""
return data['totalSize'] | 5,326,441 |
def update(path):
"""Add copyright stuff to the begining of files.
"""
for filename in path_to_pyfile_list(path):
do_update(filename) | 5,326,442 |
def barrier(packet_send_time):
"""Master send a Start message and then waits for a reply.
Slave waits for Start message, then sends reply, then pauses
for packet_send_time so both master and slave return from
barrier() at the same time."""
if master_device:
uart.write(StartGame().to_bytes())
d_print(2, "StartGame TX")
packet = read_packet(timeout=protocol_timeout)
if isinstance(packet, StartGame):
d_print(2, "StartGame RX")
else:
print("Unexpected packet type", packet)
else:
packet = read_packet(timeout=protocol_timeout)
if isinstance(packet, StartGame):
d_print(2, "StartGame RX")
uart.write(StartGame().to_bytes())
d_print(2, "StartGame TX")
else:
print("Unexpected packet type", packet)
print("Sleeping to sync up", packet_send_time)
time.sleep(packet_send_time) | 5,326,443 |
def _check_hex(dummy_option, opt, value):
"""
Checks if a value is given in a decimal integer of hexadecimal reppresentation.
Returns the converted value or rises an exception on error.
"""
try:
if value.lower().startswith("0x"):
return int(value, 16)
else:
return int(value)
except ValueError:
raise OptionValueError(
"option {0:s}: invalid integer or hexadecimal value: {1:s}.".format(opt, value)) | 5,326,444 |
async def test_step_user(opp):
"""Test that the user step works."""
conf = {CONF_PLACE_ID: "12345", CONF_SERVICE_ID: "12345"}
with patch(
"openpeerpower.components.recollect_waste.async_setup_entry", return_value=True
), patch(
"aiorecollect.client.Client.async_get_next_pickup_event", return_value=True
):
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
await opp.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "12345, 12345"
assert result["data"] == {CONF_PLACE_ID: "12345", CONF_SERVICE_ID: "12345"} | 5,326,445 |
def test_simple_scenario_configs():
"""Should execute configs commands as expected."""
with supports.ScenarioRunner("simple/scenario_configs.yaml") as sr:
sr.check_success()
assert sr.shell.execution_history[0].result.status == "SUCCESS" | 5,326,446 |
def get_challenge():
"""returns the ChallengeSetting object, from cache if cache is enabled"""
challenge = cache_mgr.get_cache('challenge')
if not challenge:
challenge, _ = ChallengeSetting.objects.get_or_create(pk=1)
# check the WattDepot URL to ensure it does't end with '/'
if challenge.wattdepot_server_url:
while challenge.wattdepot_server_url.endswith('/'):
challenge.wattdepot_server_url = challenge.wattdepot_server_url[:-1]
# create the admin
create_admin_user()
cache_mgr.set_cache('challenge', challenge, 2592000)
return challenge | 5,326,447 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(" ", 1)
extra = None
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif args:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`Pass the user's username, id or reply!`")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra | 5,326,448 |
def migrate_data(conn: redis.StrictRedis, data: dict) -> list:
"""
Uploads the given data to the given redis database connection
"""
pipe = conn.pipeline()
for key, value in data.items():
command_and_formatter = TYPE_TO_PUT_COMMAND[value["type"]]
command = command_and_formatter[0]
formatter = command_and_formatter[1]
redis_method = getattr(pipe, command)
formatted_values = formatter(value["value"])
arguments = [key] + formatted_values
redis_method(*arguments)
return pipe.execute() | 5,326,449 |
def test_len_returns_count_of_labels_in_matcher(matcher: TokenMatcher,) -> None:
"""It returns the sum of unique labels in the matcher."""
assert len(matcher) == 2 | 5,326,450 |
def test_dict_inequality_str():
"""Test equality error for string dict values."""
param1 = uvp.UVParameter(name="p1", value={"v1": 1, "s1": "test"})
param4 = uvp.UVParameter(name="p3", value={"v1": 1, "s1": "foo"})
assert param1 != param4 | 5,326,451 |
def dummy_location(db, create_location):
"""Give you a dummy default location."""
loc = create_location(u'Test')
db.session.flush()
return loc | 5,326,452 |
def get_resources_path() -> Path:
""" Convenience method to return the `resources` directory in this project """
return alpyne._ROOT_PATH.joinpath("resources") | 5,326,453 |
def get_gti_file(eventfile,segment_length):
"""
Creating the individual .gti files for my data segments!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
segment_length - length of the individual segments for combining power spectra
"""
parent_folder = str(pathlib.Path(eventfile).parent)
gtis = fits.open(eventfile)[2].data
times = fits.open(eventfile)[1].data['TIME']
Tobs_start = gtis[0][0] #MJD for the observation start time
Tobs_end = gtis[-1][1] #MJD for the observation end time
segment_times = np.arange(Tobs_start,Tobs_end+segment_length,segment_length) #array of time values, starting
#Jul 10 2019: also added Tobs_end+segment_length instead of Tobs_end, to get that final piece of data
binned_counts, bin_edges, binnumber = stats.binned_statistic(times,np.ones(len(times)),statistic='sum',bins=segment_times)
#bin edges defined by left boundary
gtilist = open(parent_folder + '/segment_GTI.list','w')
gti_folder = parent_folder + '/gtis/'
Lv2_mkdir.makedir(gti_folder)
for i in tqdm(range(len(bin_edges)-1)):
gtilist.write('GTI'+str(i).zfill(6)+ ' ' + str(bin_edges[i]) + '\n')
gti_file = gti_folder + str(segment_length).zfill(5) + 's_GTI' + str(i).zfill(6) + '.gti'
if binned_counts[i] != 0 and os.path.exists(gti_file)==False: #prevents from processing GTIs with no counts
#print(str(segment_times[i]),str(segment_times[i+1]))
subprocess.run(['mkgti.py','--gtiname',gti_file,str(segment_times[i]),str(segment_times[i+1])] )
#no need to do 'startmet=', 'stopmet=', but make sure I do it in the right order!
gtilist.close()
return | 5,326,454 |
def is_running_in_container():
# type: () -> bool
""" Determines if we're running in an lxc/docker container. """
out = subprocess.check_output('cat /proc/1/sched', shell=True)
out = out.decode('utf-8').lower()
checks = [
'docker' in out,
'/lxc/' in out,
out.split()[0] not in ('systemd', 'init',),
os.path.exists('/.dockerenv'),
os.path.exists('/.dockerinit'),
os.getenv('container', None) is not None
]
return any(checks) | 5,326,455 |
def load_imagenet_val(num=None):
"""
Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
assert False, 'Need to download imagenet_val_25.npz in datasets folder'
f = np.load(imagenet_fn)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names | 5,326,456 |
def sk_rot_mx(rot_vec):
"""
use Rodrigues' rotation formula to transform the rotation vector into rotation matrix
:param rot_vec:
:return:
"""
theta = np.linalg.norm(rot_vec)
vector = np.array(rot_vec) * math.sin(theta / 2.0) / theta
a = math.cos(theta / 2.0)
b = -vector[0]
c = -vector[1]
d = -vector[2]
return np.array(
[
[
a * a + b * b - c * c - d * d,
2 * (b * c + a * d),
2 * (b * d - a * c)
],
[
2 * (b * c - a * d),
a * a + c * c - b * b - d * d,
2 * (c * d + a * b)
],
[
2 * (b * d + a * c),
2 * (c * d - a * b),
a * a + d * d - b * b - c * c
]
]
) | 5,326,457 |
def split_line(line, points, tolerance=1e-9):
"""Split line at point or multipoint, within some tolerance
"""
to_split = snap_line(line, points, tolerance)
return list(split(to_split, points)) | 5,326,458 |
def analyze_data() -> bool:
"""
Read file format xml and returns name data
Parameters
----------
Returns
-------
True: bool
Answer - whether the copy was successful
"""
path_to_file = Path.PATH_TO_PROGRAM + Path.DATA_DIRECTORY
files_list = os.listdir(path_to_file)
for file in files_list:
copy_file(read_xml_file(path_to_file + file))
return True | 5,326,459 |
def cross_validation(docs, values, k):
"""
docs: Dict with text lists separate by value
values: Target values texts
k: Steps of cross validation
"""
group_size = {}
confusion_matrix = []
m = {'true':{}, 'false':{}}
for value in values:
group_size[value] = len(docs[value])/k
m['true'][value] = 0
m['false'][value] = 0
for i in xrange(0,k):
training = copy.deepcopy(docs)
confusion_matrix.insert(i, copy.deepcopy(m))
for value in values:
begin = i * group_size[value]
end = (i + 1) * group_size[value]
test = training[value][begin:end]
del training[value][begin:end]
probabilities, vocabulary = learn(training, values)
for doc in test:
prob_value = classify(doc, probabilities, vocabulary, values)
if value == prob_value:
confusion_matrix[i]['true'][value] += 1
else:
confusion_matrix[i]['false'][prob_value] += 1
return confusion_matrix | 5,326,460 |
def test_rrm_unexpected(dev, apdev):
"""hostapd unexpected rrm"""
check_rrm_support(dev[0])
params = {"ssid": "rrm", "rrm_neighbor_report": "0"}
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = hapd.own_addr()
dev[0].connect("rrm", key_mgmt="NONE", scan_freq="2412")
addr = dev[0].own_addr()
hdr = "d0003a01" + bssid.replace(':', '') + addr.replace(':', '') + bssid.replace(':', '') + "1000"
hapd.set("ext_mgmt_frame_handling", "1")
tests = ["050401"]
for t in tests:
if "OK" not in hapd.request("MGMT_RX_PROCESS freq=2412 datarate=0 ssi_signal=-30 frame=" + hdr + t):
raise Exception("MGMT_RX_PROCESS failed for " + t) | 5,326,461 |
def quote_plus(url, safe='/', encoding=None, errors=None):
"""Wrapper for urllib.parse.quote_plus"""
return uquote_plus(url, safe=safe, encoding=encoding, errors=errors) | 5,326,462 |
def test_combining_mutates():
"""Test combining mutates."""
DATA_INT = {"key_int": 0}
DATA_INT2 = {"key_int_2": 1}
COMBINED_INT = {"key_int": 0, "key_int_2": 1}
command1 = Command("method", "path", DATA_INT)
command2 = Command("method", "path", DATA_INT2)
combined = command1 + command2
# Adding shouldn't mutate the original commands
assert command1._data == DATA_INT
assert command2._data == DATA_INT2
assert combined._data == COMBINED_INT
# Combining should mutate the original command
command1.combine_data(command2)
assert command1._data == COMBINED_INT
assert command2._data == DATA_INT2 | 5,326,463 |
async def test_login(mock_aioresponse: aioresponses) -> None:
"""Test logging in to the API."""
m = mock_aioresponse
m.clear()
m.post(f"{BASE_URL}/o/token/", status=200, payload=login_data, repeat=False)
m.post(f"{BASE_URL}/o/token/", status=403, payload=login_data, repeat=False)
m.post(f"{BASE_URL}/o/token/", status=403, reason="NO", repeat=False)
yale = Client("test", "test", "test")
await yale.login()
assert yale.token == login_data["access_token"]
assert yale.refresh_token == login_data["refresh_token"]
await yale._validate_access_token()
yale.login_ts = yale.login_ts - yale.token_expires_in - 1001
with pytest.raises(AuthenticationError, match=r".*Check credentials*"):
await yale._validate_access_token() | 5,326,464 |
def test_measure_get_meas(withings_api: WithingsApi) -> None:
"""Test function."""
responses_add_measure_get_meas()
assert withings_api.measure_get_meas() == MeasureGetMeasResponse(
more=False,
offset=0,
timezone=TIMEZONE0,
updatetime=arrow.get(1409596058).to(TIMEZONE0),
measuregrps=(
MeasureGetMeasGroup(
attrib=MeasureGetMeasGroupAttrib.MANUAL_USER_DURING_ACCOUNT_CREATION,
category=MeasureGetMeasGroupCategory.REAL,
created=arrow.get(1111111111).to(TIMEZONE0),
date=arrow.get("2019-01-01").to(TIMEZONE0),
deviceid="dev1",
grpid=1,
measures=(
MeasureGetMeasMeasure(type=MeasureType.HEIGHT, unit=110, value=110),
MeasureGetMeasMeasure(type=MeasureType.WEIGHT, unit=120, value=120),
),
),
MeasureGetMeasGroup(
attrib=MeasureGetMeasGroupAttrib.DEVICE_ENTRY_FOR_USER_AMBIGUOUS,
category=MeasureGetMeasGroupCategory.USER_OBJECTIVES,
created=arrow.get(2222222222).to(TIMEZONE0),
date=arrow.get("2019-01-02").to(TIMEZONE0),
deviceid="dev2",
grpid=2,
measures=(
MeasureGetMeasMeasure(
type=MeasureType.BODY_TEMPERATURE, unit=210, value=210
),
MeasureGetMeasMeasure(
type=MeasureType.BONE_MASS, unit=220, value=220
),
),
),
),
) | 5,326,465 |
def JWST():
"""
This will be the main simulated instrument
"""
def __init__():
pass
def get_MIRI_noise(self):
pass
def get_NIRCam_noise(self):
pass
def get_NIRISS_noise(self):
pass
def get_NIRSpec_noise(self):
pass | 5,326,466 |
def create_network_policy(
spec: Dict[str, Any] = None,
spec_path: str = None,
ns: str = "default",
secrets: Secrets = None,
):
"""
Create a network policy in the given namespace eitehr from the definition
as `spec` or from a file containing the definition at `spec_path`.
"""
api = create_k8s_api_client(secrets)
if spec_path and os.path.isfile(spec_path):
with open(spec_path) as f:
p, ext = os.path.splitext(spec_path)
if ext == ".json":
spec = json.loads(f.read())
elif ext in [".yml", ".yaml"]:
spec = yaml.safe_load(f.read())
else:
raise ActivityFailed(f"cannot process {spec_path}")
v1 = client.NetworkingV1Api(api)
v1.create_namespaced_network_policy(ns, body=spec) | 5,326,467 |
def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX
"""
Call averageSeries after inserting wildcards at the given position(s).
Example:
.. code-block:: none
&target=averageSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1)
This would be the equivalent of
``target=averageSeries(host.*.cpu-user.value)&target=averageSeries(host.*.cpu-system.value)``
"""
if isinstance(position, int):
positions = [position]
else:
positions = position
result = []
matchedList = {}
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname not in matchedList:
matchedList[newname] = []
matchedList[newname].append(series)
for name in matchedList.keys():
result.append( averageSeries(requestContext, (matchedList[name]))[0] )
result[-1].name = name
return result | 5,326,468 |
def errore_ddp_digitale(V):
"""
Calcola l'errore della misura di ddp del multimetro digitale
supponendo che si sia scelta la scala corretta.
La ddp deve essere data in Volt
"""
V=absolute(V)
if V<0.2: return sqrt(V**2*25e-6+1e-8)
if V<2: return sqrt(V**2*25e-6+1e-6)
if V<20: return sqrt(V**2*25e-6+1e-4)
if V<200: return sqrt(V**2*25e-6+1e-2)
print("Tollerati valori minori di 200V")
return | 5,326,469 |
def water_zone(zone_name, force_water=False):
"""
Trigger watering of a zone by moisture sensor
:param zone_name: The name of the zone as used in the zone section header in the config file (i.e. 'zone_1', 'zone_4')
:type zone_name: str
"""
water = False
msg = ' '
# print('%s: Seconds_before_ok_to_water: %s' % (zone_name, garden_pi_zones[zone_name].seconds_before_ok_to_water))
# print('%s: Moisture: %s' % (zone_name, garden_pi_zones[zone_name].moisture))
# print('%s: Moisture threshold: %s' % (zone_name, garden_pi_zones[zone_name].moisture_water_threshold))
if (garden_pi_zones[zone_name].seconds_before_ok_to_water == 0) and (garden_pi_zones[zone_name].moisture < garden_pi_zones[zone_name].moisture_water_threshold):
# print("In 1")
water = True
msg = 'MOISTURE SENSOR'
if force_water is True:
# print("In 2")
water = True
msg = 'IMMEDIATE'
#print("%s: Water: %s" % (zone_name, water))
if water is True:
# print("In 3")
s = time.time()
garden_pi_logger.log_csv(messg='START WATERING (%s): %s' % (msg, zone_name.upper()))
garden_pi_zones[zone_name].water()
f = time.time()
garden_pi_logger.log_csv(messg='END WATERING (%s): %s. Elapsed Time: %ss' % (msg, zone_name.upper(), format_float(f-s))) | 5,326,470 |
def print_sql_amortization_table() -> None:
"""Prints the amortization table from the analysis.db file"""
print(
"----------------------------------------"
"----------------------------------------"
)
print("Amortization Table:")
print()
amortization_data = {
'Period': [], 'Monthly Payment': [],
'Principal Payment': [], 'Interest Payment': [],
'Loan Balance': []
}
for key, value in PropertyInfo.amortization_table.items():
for num in value:
if key == 'Period':
amortization_data[key].append(num)
else:
amortization_data[key].append(f"{num:,.2f}")
with amortization_table() as (con, cur):
drop_amortization_table(con, cur)
create_amortization_table(con, cur)
add_amortization_data(con, cur, amortization_data)
print(get_amortization_table(con))
print()
print(
"----------------------------------------"
"----------------------------------------"
)
print() | 5,326,471 |
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
print("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
print("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd) | 5,326,472 |
def list_profiles_in(path):
"""list profiles in a given root directory"""
files = os.listdir(path)
profiles = []
for f in files:
full_path = os.path.join(path, f)
if os.path.isdir(full_path) and f.startswith('profile_'):
profiles.append(f.split('_',1)[-1])
return profiles | 5,326,473 |
def lambda_handler(context):
"""
url_list = [
{'name': 'SERVER1', 'url': 'http://site-url-1.com/ping'},
{'name': 'SERVER2', 'url': 'http://site-url-2.com/ping?key=secure-key'}
]
"""
url_list = get_ping_urls()
#print(url_list)
results = ping.fetch_all(url_list)
print (results)
# Post metrics to CloudWatch
post_to_cw(results) | 5,326,474 |
def test_freshness_in_catchup(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward,
tconf, ):
"""
Check that InstanceChange with reason "State signatures are not updated for too long"
are not sends in catchup without view change.
"""
view_no = txnPoolNodeSet[0].viewNo
lagging_node = txnPoolNodeSet[-1]
with delay_rules(lagging_node.nodeIbStasher, cr_delay(), cDelay()):
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)
lagging_node.start_catchup()
looper.runFor(tconf.ACCEPTABLE_FRESHNESS_INTERVALS_COUNT * tconf.STATE_FRESHNESS_UPDATE_INTERVAL + 5)
print(lagging_node.view_changer.instance_changes._cache)
assert not lagging_node.view_changer.instance_changes.has_inst_chng_from(view_no + 1, lagging_node.name) | 5,326,475 |
def print_result_and_exit(embargoed_builds, embargoed_pullspecs, embargoed_releases, as_yaml, as_json):
""" Prints embargo detection result and exit
:param embargoed_builds: list of dicts of embargoed builds
:embargoed_pullspecs: list of embargoed image pullspecs
:embargoed_releases: list of pullspecs of embargoed release payloads
:as_yaml: if true, print the result as an YAML document
:as_json: if true, print the result as a JSON document
"""
out = {
"has_embargoes": bool(embargoed_builds)
}
if embargoed_builds:
out["builds"] = embargoed_builds
if embargoed_pullspecs:
out["pullspecs"] = embargoed_pullspecs
if embargoed_builds:
out["releases"] = embargoed_releases
if as_yaml:
yaml.dump(out, sys.stdout)
elif as_json:
json.dump(out, sys.stdout)
elif not embargoed_builds:
green_print("No builds contain embargoed fixes.")
exit(2) # we use this special exit status to indicate no embargoed fixes are detected.
return # won't reach here. it is for spoofing the unittest when exit is mocked.
else:
if embargoed_releases:
green_print(f"Found {len(embargoed_releases)} release payload containing embargoed fixes:")
for release in embargoed_releases:
green_print(release)
green_print(f"Found {len(embargoed_builds)} builds containing embargoed fixes:")
for index, build in enumerate(embargoed_builds):
line = f"{build['id']}\t{build['nvr']}"
if embargoed_pullspecs:
line += f"\t{embargoed_pullspecs[index]}"
green_print(line)
exit(0) | 5,326,476 |
def extractLine(shape, z = 0):
"""
Extracts a line from a shape line.
"""
x = shape.exteriorpoints()[0][0] - shape.exteriorpoints()[1][0]
y = shape.exteriorpoints()[0][1] - shape.exteriorpoints()[1][1]
return (x, y, z) | 5,326,477 |
def get_fragility_model_04(fmodel, fname):
"""
:param fmodel:
a fragilityModel node
:param fname:
path of the fragility file
:returns:
an :class:`openquake.risklib.scientific.FragilityModel` instance
"""
logging.warn('Please upgrade %s to NRML 0.5', fname)
node05 = convert_fragility_model_04(fmodel, fname)
node05.limitStates.text = node05.limitStates.text.split()
return get_fragility_model(node05, fname) | 5,326,478 |
def user_weights():
"""Used to display the avg and recommended amounts of calories to the user"""
print(colors.green + 'The average and healthy amount of calories for a person is about 2100 or less calories per '
'day.\n',
colors.reset)
if '124' >= str(user_weight) >= '1':
print('Your recommended amount of calories to consume per day is 1,600 or less calories per day.\n')
counter()
elif str(user_weight) >= '125' or str(user_weight) <= '200':
print('Your recommended amount of calories to consume per day is 2700 or less calories per day.\n')
counter()
elif str(user_weight) >= '300':
print('Your recommended amount of calories to consume per day is 3764 or less calories per day.\n')
counter()
else:
print(colors.red + 'User weight error found! Restarting...\n' + colors.reset)
restart.restart() | 5,326,479 |
def substring_in_list(substr_to_find, list_to_search):
"""
Returns a boolean value to indicate whether or not a given substring
is located within the strings of a list.
"""
result = [s for s in list_to_search if substr_to_find in s]
return len(result) > 0 | 5,326,480 |
def downsample_image(image: np.ndarray, scale: int) -> np.ndarray:
"""Downsamples the image by an integer factor to prevent artifacts."""
if scale == 1:
return image
height, width = image.shape[:2]
if height % scale > 0 or width % scale > 0:
raise ValueError(f'Image shape ({height},{width}) must be divisible by the'
f' scale ({scale}).')
out_height, out_width = height // scale, width // scale
resized = cv2.resize(image, (out_width, out_height), cv2.INTER_AREA)
return resized | 5,326,481 |
def getQueryString( bindings, variableName ):
""" Columns a bunch of data about the bindings. Will return properly formatted strings for
updating, inserting, and querying the SQLite table specified in the bindings dictionary. Will also
return the table name and a string that lists the columns (properly formatted for use in an SQLite
query).
variableName is the name to use for the SQLiteC++ Statement variable in the generated methods.
"""
table = ''
columns = []
queryData = []
insertData = []
updateData = []
whereClaus = []
bindData = []
index = 0
for b in bindings:
# Process table
if (b['type'] == 'table'):
table = b['table']
# Process column
elif (b['type'] == 'column'):
columns.append( b['column'] )
# Process query data
if (b['variableType'] == 'string'):
text = '{variable} = std::string( {query}.getColumn({index}).getText() );'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
else:
text = '{variable} = {query}.getColumn({index});'
text = text.format(variable = b['variable'], index = index, query = variableName)
queryData.append( text )
index = index + 1
# Process insert data
if (b['variableType'] == 'string' or b['variableType'] == 'char*'):
insertData.append( "\"'\" << " + b['variable'] + " << \"'\"" )
else:
insertData.append( b['variable'] )
# Process id
if (b.get('id')):
whereClaus.append( b['column'] + ' = ?' )
text = 'query.bind({index}, {variableName});'
text = text.format(index = len(whereClaus), variableName = b['variable'])
bindData.append( text )
# Process update data
for i in range(0, len(columns)):
t = columns[i] + '=" << ' + insertData[i]
updateData.append(t)
columns = ', '.join( columns )
updateData = ' << ", '.join( updateData )
insertData = ' << \", " << '.join( insertData )
queryData = '\n'.join( queryData )
whereClaus = ' AND '.join( whereClaus )
bindData = '\n\t'.join( bindData )
return {
'table': table,
'updateData': updateData,
'columns': columns,
'insertData': insertData,
'queryData': queryData,
'whereClaus': whereClaus,
'bindData': bindData
} | 5,326,482 |
def multiplicities(pattern):
""" Return a dictionary keyed by the geodesics in the given pattern, with values equal to the number of times the geodesic occurs."""
g = geodesics(pattern)
ans = {}
x = 0
for i in g:
if i == x:
ans[i] += 1
else:
x = i
ans[i] = 1
return ans | 5,326,483 |
def show_menu():
""" Shows a menu """
print '================== ' + util.HEADER + 'WORKFLOW MENU' + util.ENDC + ' =================='
print '1) Development - Create a git branch off of staging'
print '2) Merge - Merge your development branch to staging (GitHub)'
print '3) Build - Builds project, and attempts to reseed your database for you'
print '4) Setup - Setups project'
print '5) Exit - Exits workflow'
print '==================================================='
choice = raw_input('Enter in a number (1-5): ')
while choice not in ['1', '2', '3', '4', '5']:
choice = raw_input(util.FAIL + 'Invalid Input! ' + util.ENDC + 'Please enter in a number (1-5): ')
return choice | 5,326,484 |
def imap4_utf7_decode(data):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Input is bytes (Python 3) or str (Python 2); output is always
unicode. If non-bytes/str input is provided, the input is returned
unchanged.
"""
if not isinstance(data, bytes):
return bytearray(data, 'utf-8')
return imap_utf7_codec.imap4_utf7_decode(data) | 5,326,485 |
def get_characters(character_path, character_dim):
"""Reads list of characters .txt file and returns embedding matrix and mappings from characters to character ids.
Input:
character_path: path to characters.txt
character_dim: integer
Returns:
emb_matrix: Numpy array shape (len(characters)+2, character_dim) containing glove embeddings
(plus PAD and UNK embeddings in first two rows).
The rows of emb_matrix correspond to the word ids given in char2id and id2char
char2id: dictionary mapping char (string) to char id (int)
id2char: dictionary mapping char id (int) to char (string)
"""
print "Loading characters from file: %s" % character_path
vocab_size = 932
char2id = {}
id2char = {}
# put start tokens in the dictionaries
idx = 0
for word in _START_VOCAB:
char2id[word] = idx
id2char[idx] = word
idx += 1
# go through glove vecs
with open(character_path, 'r') as fh:
for line in tqdm(fh, total=vocab_size):
char = line.strip()
char2id[char] = idx
id2char[idx] = char
idx += 1
final_vocab_size = vocab_size + len(_START_VOCAB)
assert len(char2id) == final_vocab_size
assert len(id2char) == final_vocab_size
assert idx == final_vocab_size
emb_matrix = np.random.randn(final_vocab_size, character_dim)
return emb_matrix, char2id, id2char | 5,326,486 |
def create_data_storage(dxres):
"""
Creates a DataStorage record for the given DNAnexus sequencing results.
Args:
dxres: `scgpm_seqresults_dnanexus.dnanexus_utils.du.DxSeqResults()` instance that contains
sequencing results metadata in DNAnexus for the given srun.
Returns:
`dict`. The response from the server containing the JSON serialization of the new
DataStorage record.
"""
logger.debug("In create_data_storage().")
payload = {}
payload["name"] = dxres.dx_project_name
exists = models.DataStorage.find_by(payload=payload)
if exists:
return exists
payload["project_identifier"] = dxres.dx_project_id
payload["data_storage_provider_id"] = models.DataStorageProvider("DNAnexus").id
# Create DataStorage
res_json = models.DataStorage.post(payload)
return res_json | 5,326,487 |
def french_to_english(french_text: str) -> str:
"""This function translates from french to english
Parameters
----------
french_text : str
french text to translate
Returns
-------
str
translated text
"""
language_translator = translator_instance()
response = language_translator.translate(
text=french_text, model_id="fr-en"
).get_result()
english_text = response["translations"][0]["translation"]
return english_text | 5,326,488 |
def test_section_delete(api):
""" Verify the ``section_delete`` method call """
SECTION_ID = 1234
api._session.request.return_value = {}
section = api.section_delete(SECTION_ID)
exp_call = mock.call(method=POST,
path=AP['delete_section'].format(section_id=SECTION_ID))
assert isinstance(section, dict)
assert section == dict()
assert api._session.request.call_args == exp_call | 5,326,489 |
async def start_select_city(msg: Message, lenta: LentaClient, repo: Repo, state: FSMContext):
"""
Начало процесса выбора магазина
Отображение доступных городов
"""
city_keyboard = await services.store.get_inline_keyboard_for_cities(lenta)
await AddStoreForm.select_city.set()
# Сохраняем ID сообщения в хранилище для возможности его удаления вне inline клавиатуры
await msg.answer("Выбор магазина", reply_markup=SEND_LOCATION)
choice_city_msg = await msg.answer("Список доступных городов", reply_markup=city_keyboard)
await state.update_data(message_id=choice_city_msg.message_id) | 5,326,490 |
def prefit_histograms(
rex_dir: str | Path,
samples: Iterable[str],
region: str,
fit_name: str = "tW",
) -> dict[str, TH1]:
"""Retrieve sample prefit histograms for a region.
Parameters
----------
rex_dir : str or pathlib.Path
Path of the TRExFitter result directory
samples : Iterable(str)
Physics samples of the desired histograms
region : str
Region to get histograms for
fit_name : str
Name of the Fit
Returns
-------
dict(str, tdub.root.TH1)
Prefit histograms.
"""
root_path = Path(rex_dir) / "Histograms" / f"{fit_name}_{region}_histos.root"
root_file = uproot.open(root_path)
histograms = {}
for samp in samples:
h = prefit_histogram(root_file, samp, region)
if h is None:
log.warn(f"Histogram for sample {samp} in region: {region} not found")
histograms[samp] = h
return histograms | 5,326,491 |
def ymstring2mjd( ymstr ):
"""
The `ymstring2mjd` function enables array input.
Documentation see the `_ymstring2mjd` function.
"""
ymstr = np.array(ymstr,ndmin=1)
ymstr_count = np.size(ymstr)
mjd = np.zeros(ymstr_count,dtype=np.float_)
for ix in range(ymstr_count):
try:
mjd[ix] = _ymstring2mjd( ymstr[ix] )
except:
mjd[ix] = np.nan
return mjd | 5,326,492 |
def average(l):
""" Computes average of 2-D list """
llen = len(l)
def divide(x):
return x / float(llen)
return list(map(divide, map(sum, zip(*l)))) | 5,326,493 |
def slugify3(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
result.extend(unidecode(word).split())
return unicode(delim.join(result)) | 5,326,494 |
def test_construct_graph():
"""Example-based test that graph construction works correctly.
Uses 4hhb PDB file as an example test case.
"""
file_path = Path(__file__).parent / "test_data" / "4hhb.pdb"
G = construct_graph(pdb_path=str(file_path))
assert isinstance(G, nx.Graph)
assert len(G) == 574
# Check number of peptide bonds
peptide_bond_edges = [
(u, v)
for u, v, d in G.edges(data=True)
if d["kind"] == {"peptide_bond"}
]
assert len(peptide_bond_edges) == 570 | 5,326,495 |
def create_phase2_edgemerge_table():
"""phase2_edgemerge retrieves the initial phase 2 ud_edges coordinate from the u_edges and d_edges coordinates."""
fname = "phase2_edgemerge"
global u_edges_plus_d_edges_to_ud_edges
c_u = cb.CubieCube()
c_d = cb.CubieCube()
c_ud = cb.CubieCube()
edge_u = [Ed.UR, Ed.UF, Ed.UL, Ed.UB]
edge_d = [Ed.DR, Ed.DF, Ed.DL, Ed.DB]
edge_ud = [Ed.UR, Ed.UF, Ed.UL, Ed.UB, Ed.DR, Ed.DF, Ed.DL, Ed.DB]
if not path.isfile(fname):
cnt = 0
print("creating " + fname + " table...")
u_edges_plus_d_edges_to_ud_edges = ar.array('H', [0 for i in range(N_U_EDGES_PHASE2 * N_PERM_4)])
for i in range(N_U_EDGES_PHASE2):
c_u.set_u_edges(i)
for j in range(N_CHOOSE_8_4):
c_d.set_d_edges(j * N_PERM_4)
invalid = False
for e in edge_ud:
c_ud.ep[e] = -1 # invalidate edges
if c_u.ep[e] in edge_u:
c_ud.ep[e] = c_u.ep[e]
if c_d.ep[e] in edge_d:
c_ud.ep[e] = c_d.ep[e]
if c_ud.ep[e] == -1:
invalid = True # edge collision
break
if not invalid:
for k in range(N_PERM_4):
c_d.set_d_edges(j * N_PERM_4 + k)
for e in edge_ud:
if c_u.ep[e] in edge_u:
c_ud.ep[e] = c_u.ep[e]
if c_d.ep[e] in edge_d:
c_ud.ep[e] = c_d.ep[e]
u_edges_plus_d_edges_to_ud_edges[N_PERM_4 * i + k] = c_ud.get_ud_edges()
cnt += 1
if cnt % 2000 == 0:
print('.', end='', flush=True)
print()
fh = open(fname, "wb")
u_edges_plus_d_edges_to_ud_edges.tofile(fh)
fh.close()
print()
else:
fh = open(fname, "rb")
u_edges_plus_d_edges_to_ud_edges = ar.array('H')
u_edges_plus_d_edges_to_ud_edges.fromfile(fh, N_U_EDGES_PHASE2 * N_PERM_4) | 5,326,496 |
def teacher_registeration(request):
"""
Info: Registeration for the teacher.
Request-Body: email_id -> str
password -> str
image -> file
name -> str
date_of_birth -> str
education_qualification -> JSON
Response: message -> str
"""
email = request.data.get("email")
password = request.data.get("password")
image = request.data.get("image")
gender = request.data.get("gender")
name = request.data.get("name")
date_of_birth = request.data.get("date_of_birth")
education_qualification = request.data.get("education_qualification")
check_for_empty(
email, gender, password, date_of_birth, education_qualification, name
)
if Teacher.objects.filter(email=email).exists():
raise AlreadyExistsException("Email-Id already exists")
password = cryptocode.encrypt(password, CRYPTO_SECRET_KEY)
date_of_birth = datetime.strptime(date_of_birth, "%d-%m-%Y").date()
teacher = Teacher(
email=email,
gender=gender,
password=password,
date_of_birth=date_of_birth,
name=name,
education_qualification=education_qualification,
)
image_url = get_image_url_and_upload(image, teacher)
teacher.image_url = image_url
teacher.save()
send_verfication_mail(teacher)
response = {"message": "Verfication Mail Sent"}
return JsonResponse(response, status=201) | 5,326,497 |
def check_response(response):
""" Checks that a response is successful, raising the appropriate Exceptions otherwise. """
status_code = response.status_code
if 100 < status_code < 299:
return True
elif status_code == 401 or status_code == 403:
message = get_response_data(response)
raise AuthError('Access Token Error, Received ' + str(status_code) +
' from Outlook REST Endpoint with the message: {}'.format(message))
elif status_code == 400:
message = get_response_data(response)
raise RequestError('The request made to the Outlook API was invalid. Received the following message: {}'.
format(message))
else:
message = get_response_data(response)
raise APIError('Encountered an unknown error from the Outlook API: {}'.format(message)) | 5,326,498 |
def walk_storage_from_command(command: instances.FilesRelatedCommand,
filesystem: Filesystem
) -> Iterator[Tuple[str, str, str]]:
"""Typical iteration by command settings."""
return walk(command.storage_folder, filesystem,
command.branch, command.leaf) | 5,326,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.