content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def getTensorRelativError(tA, pA):
"""Get the relative error between two tensors."""
pA_shape = np.shape(pA)
tA_shape = np.shape(tA)
assert (pA_shape == tA_shape), "Arrays must be same shape"
err = np.max(np.abs(np.array(pA)-np.array(tA)))
return err
|
ea79bcebd5a39c020cdb5b35ee36dfe20b2f9c71
| 3,643,700
|
import itertools
def eliminations(rct_gras, prd_gras):
""" find eliminations consistent with these reactants and products
:param rct_gras: reactant graphs (must have non-overlapping keys)
:param prd_gras: product graphs (must have non-overlapping keys)
Eliminations are identified by forming a bond between an attacking heavy
atom and another atom not initially bonded to it, forming a ring. The bond
adjacent to the attacked atom is then broken, along with a second bond in
the ring, downstream of the attacking heavy atom, away from the attacked
atom.
"""
_assert_is_valid_reagent_graph_list(rct_gras)
_assert_is_valid_reagent_graph_list(prd_gras)
rxns = []
if len(rct_gras) == 1 and len(prd_gras) == 2:
rct_gra, = rct_gras
prds_gra = union_from_sequence(prd_gras)
ngb_keys_dct = atoms_neighbor_atom_keys(rct_gra)
frm1_keys = atom_keys(rct_gra, excl_syms=('H',))
frm2_keys = atom_keys(rct_gra)
bnd_keys = bond_keys(rct_gra)
frm_bnd_keys = [(frm1_key, frm2_key) for frm1_key, frm2_key
in itertools.product(frm1_keys, frm2_keys)
if frm1_key != frm2_key and
not frozenset({frm1_key, frm2_key}) in bnd_keys]
for frm1_key, frm2_key in frm_bnd_keys:
# Bond the radical atom to the hydrogen atom
gra_ = add_bonds(rct_gra, [(frm2_key, frm1_key)])
# Get keys to the ring formed by this extra bond
rng_keys = next((ks for ks in rings_atom_keys(gra_)
if frm2_key in ks and frm1_key in ks), None)
# Eliminations (as far as I can tell) only happen through TSs with
# 3- or 4-membered rings
if rng_keys is not None and len(rng_keys) < 5:
frm1_ngb_key, = ngb_keys_dct[frm1_key] & set(rng_keys)
frm2_ngb_key, = ngb_keys_dct[frm2_key] & set(rng_keys)
# Break the bonds on either side of the newly formed bond
gra_ = remove_bonds(gra_, [(frm1_key, frm1_ngb_key)])
gra_ = remove_bonds(gra_, [(frm2_key, frm2_ngb_key)])
inv_dct = isomorphism(gra_, prds_gra)
if inv_dct:
f_frm_bnd_key = (frm1_key, frm2_key)
f_brk_bnd_key1 = (frm1_key, frm1_ngb_key)
f_brk_bnd_key2 = (frm2_key, frm2_ngb_key)
inv_ = inv_dct.__getitem__
b_frm_bnd_key1 = tuple(map(inv_, f_brk_bnd_key1))
b_frm_bnd_key2 = tuple(map(inv_, f_brk_bnd_key2))
b_brk_bnd_key = tuple(map(inv_, f_frm_bnd_key))
forw_tsg = ts.graph(rct_gra,
frm_bnd_keys=[f_frm_bnd_key],
brk_bnd_keys=[f_brk_bnd_key1,
f_brk_bnd_key2])
back_tsg = ts.graph(prds_gra,
frm_bnd_keys=[b_frm_bnd_key1,
b_frm_bnd_key2],
brk_bnd_keys=[b_brk_bnd_key])
rcts_atm_keys = list(map(atom_keys, rct_gras))
prds_atm_keys = list(map(atom_keys, prd_gras))
if inv_dct[frm2_key] not in prds_atm_keys[1]:
prds_atm_keys = list(reversed(prds_atm_keys))
# Create the reaction object
rxns.append(Reaction(
rxn_cls=par.ReactionClass.ELIMINATION,
forw_tsg=forw_tsg,
back_tsg=back_tsg,
rcts_keys=rcts_atm_keys,
prds_keys=prds_atm_keys,
))
return ts_unique(rxns)
|
3acfb77d48223e1e31f7ce9b563bc5d86102b5b2
| 3,643,701
|
def sigmoid(*columns):
"""Fit a Sigmoid through the data of the last scan.
The return value is a pair of tuples::
((a, b, x0, c), (d_a, d_b, d_x0, d_c))
where the elemets of the second tuple the estimated standard errors of the
fit parameters. The fit parameters are:
* a - amplitude of the Sigmoid
* b - steepness of the curve
* x0 - center
* c - background
if the fit failed, the result is ``(None, None)``.
Example::
cscan(...)
values, stderr = sigmoid('h', 'adet')
"""
xs, ys, dys, _, ds = _getData(columns)
fit = SigmoidFit()
res = fit.run(xs, ys, dys)
if res._failed:
return None, None
session.notifyFitCurve(ds, 'sigmoid', res.curve_x, res.curve_y)
descrs = ['amplitude', 'steepness', 'center', 'background']
vals = []
for par, err, descr in zip(res._pars[1], res._pars[2], descrs):
vals.append((descr, '%.4f' % par, '%.4f' % err))
printTable(('parameter', 'value', 'error'), vals, session.log.info)
return CommandLineFitResult((tuple(res._pars[1]), tuple(res._pars[2])))
|
e9d031d07a8ef00b73634bb44ed6a94a1788f7c9
| 3,643,702
|
import traceback
from .utils import (
get_conda_package_list,
get_required_conda_version,
update_installed_pkg_metadata,
update_metarecipe_metadata,
)
def _install(
bz2,
recipe_name,
debug=False,
meta_recipe=False,
env_var_dir="",
env_var_file="",
parent_name="",
commands_file="",
):
"""Method to install a local pre-built package to ensure package installs correctly
_install
========
This method is used to install a pre-built ggd package. conda build was used to turn the ggd recipe into a
ggd package. This script will take the locally built ggd package and install it. This method is used to
ensure the package installs correctly.
Parameters:
-----------
1) bz2: (str) The bz2 tarball package file created from the conda build
2) recipe_name: (str) The name of the ggd recipe/package
3) debug: (bool) Whether or not to set logging level to debug
4) meta_recipe: (bool) Whether or not the recipe is a meta recipe
5) env_var_dir: (str) The path to the meta-recipe tmp env var
6) env_var_file: (str) The file path to the meta-recipe tmp env var json file
7) parent_name: (str) If a meta-recipe, the name of the parent meta-recipe
8) commands_file: (str) The path to the subsetted commands used for the specific Meta-Recipe ID if meta-recipe
Returns:
+++++++
1) True if the installation was successful and the package was not already installed on the system
2) False if the package has already been installed on the system
3) If the installation fails program exits. ggd data handling is initiated to remove any new/updated files from the installation process
"""
conda_version, equals = get_required_conda_version()
conda_install = "{}conda{}{}{}".format('"', equals, conda_version, '"')
## See if it is already installed
if recipe_name in get_conda_package_list(conda_root(), include_local=True).keys():
return False
## Set CONDA_SOURCE_PREFIX environment variable
os.environ["CONDA_SOURCE_PREFIX"] = conda_root()
## base install command
install_command = ["conda", "install", "-v", "--use-local", "-y", recipe_name]
## check if debug flag needs to be added
if debug:
install_command.append("-v")
## Check if specific conda version needs to be added
if conda_version != -1:
install_command.append(conda_install)
## Install the new recipe
try:
sp.check_call(install_command, stderr=sys.stderr, stdout=sys.stdout)
## Update meta-recipe bz2 if meta-recipe
if meta_recipe:
import json
import shutil
## Check for meta-recipe environment variables
if op.exists(env_var_file):
print(
"\n:ggd:check-recipe: Loading Meta-Recipe ID specific environment variables"
)
## Load environment variables from json file
meta_env_vars = json.load(open(env_var_file))
commands_str = (
"\n".join([x.strip() for x in open(commands_file, "r")])
if op.exists(commands_file)
else ""
)
## Remove temp dir with json file
shutil.rmtree(env_var_dir)
## Update bz2 file
success, new_bz2_path = update_metarecipe_metadata(
pkg_name=recipe_name,
env_var_dict=meta_env_vars,
parent_name=parent_name,
final_file_list=[],
final_file_size_dict={},
commands_str=commands_str,
)
assert (
success
), ":ggd:check-recipe: !!ERROR!! There was a problem updating the meta-recipe metadata"
except Exception as e:
print(e)
print(
"\n:ggd:check-recipe: %s did not install properly. \n\n\t->Error message:\n"
% recipe_name
)
print(traceback.format_exc())
## Remove ggd files
recipe_dict = get_recipe_from_bz2(bz2)
species = recipe_dict["about"]["identifiers"]["species"]
genome_build = recipe_dict["about"]["identifiers"]["genome-build"]
version = recipe_dict["package"]["version"]
name = recipe_dict["package"]["name"]
ggd_jdict = {
"packages": {
name: {
"identifiers": {"species": species, "genome-build": genome_build},
"version": version,
}
}
}
try:
check_for_installation(
[recipe_name], ggd_jdict
) ## .uninstall method to remove extra ggd files
except Exception as e:
print(e)
print(
"\n:ggd:check-recipe: Review the STDOUT and STDERR, correct the errors, and re-run $ggd check-recipe\n"
)
## Exit
sys.exit(1)
## Update installed metadata
print("\n:ggd:check-recipe: Updating installed package list")
update_installed_pkg_metadata(
remove_old=False, add_packages=[recipe_name], include_local=True
)
return True
|
57aa3395ca6614c08606899cc900908dfde47c94
| 3,643,703
|
import re
def MakeSamplesFromOutput(metadata, output):
"""Create samples containing metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/nccl_benchmark_test.py
Returns:
Samples containing training metrics, and the bandwidth
"""
samples = []
metadata.update(_SAMPLE_LINE_RE.match(output).groupdict())
results = regex_util.ExtractAllMatches(
r'(Rank\s+\d+) (.*)', output)
for rank, device in results:
metadata[rank] = device
results = regex_util.ExtractAllMatches(
r'^\s*'
r'(\d+)\s+'
r'(\d+)\s+'
r'(\w+)\s+'
r'(\w+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)', output, re.MULTILINE)
max_out_of_place_algbw = 0
for row in results:
metadata_copy = metadata.copy()
metadata_copy.update(zip(_METADATA_COLUMNS, row))
for metric, metadata_key in sorted(_SAMPLE_NAMES.items()):
samples.append(sample.Sample(metric, float(metadata_copy[metadata_key]),
'GB/s', metadata_copy))
# Gbps is gigaBIT per second and GB/s is gigaBYTE per second
max_out_of_place_algbw = max(max_out_of_place_algbw,
float(metadata_copy['out_of_place_algbw']))
avg_bus_bandwidth = regex_util.ExtractExactlyOneMatch(
r'Avg bus bandwidth\s+: ([0-9\.]+)', output)
samples.append(sample.Sample('avg_busbw', float(avg_bus_bandwidth),
'GB/s', metadata))
samples.append(sample.Sample('max_out_of_place_algbw',
max_out_of_place_algbw * 8, 'Gbps', metadata))
return samples, max_out_of_place_algbw
|
2210caaf37a2fbfe768133e767754fb600435b0b
| 3,643,704
|
import tempfile
import os
def _write_bytes_to_temporary_file(local_path):
"""if `local_path` is a file-like object, write the contents to an *actual* file and
return a pair of new local filename and a function that removes the temporary file when called."""
if hasattr(local_path, "read"):
# `local_path` is a file-like object
local_bytes = local_path
local_bytes.seek(0) # reset internal pointer
temp_file, local_path = tempfile.mkstemp(suffix="-threadbare")
with os.fdopen(temp_file, "wb") as fh:
data = local_bytes.getvalue()
# data may be a string or it may be bytes.
# if it's a string we assume it's a UTF-8 string.
# skip entirely if we're on Python2
if isinstance(data, str) and common.PY3:
data = bytes(data, "utf-8")
fh.write(data)
cleanup = lambda: os.unlink(local_path)
return local_path, cleanup
return local_path, None
|
96c8796b3b93b84568b7c0467c0e49e91259a6e3
| 3,643,705
|
def tree_to_newick_rec(cur_node):
""" This recursive function is a helper function to generate the Newick string of a tree. """
items = []
num_children = len(cur_node.descendants)
for child_idx in range(num_children):
s = ''
sub_tree = tree_to_newick_rec(cur_node.descendants[child_idx])
if sub_tree != '':
s += '(' + sub_tree + ')'
s += cur_node.descendants[child_idx].name
items.append(s)
return ','.join(items)
|
751d46dbb4e3a5204900601164410b5bf7f0578b
| 3,643,706
|
import torch
def mdetr_efficientnetB3(pretrained=False, return_postprocessor=False):
"""
MDETR ENB3 with 6 encoder and 6 decoder layers.
Pretrained on our combined aligned dataset of 1.3 million images paired with text.
"""
model = _make_detr("timm_tf_efficientnet_b3_ns")
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://zenodo.org/record/4721981/files/pretrained_EB3_checkpoint.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
|
0e80da12a9fec55ccdbfdb3dcf78806bff2c6f20
| 3,643,707
|
import math
import itertools
def _check_EJR_brute_force(profile, committee):
"""
Test using brute-force whether a committee satisfies EJR.
Parameters
----------
profile : abcvoting.preferences.Profile
A profile.
committee : iterable of int
A committee.
Returns
-------
bool
"""
# should check for ell from 1 until committee size
ell_upper_bound = len(committee) + 1
# loop through all possible ell
for ell in range(1, ell_upper_bound):
# list of candidates with less than ell approved candidates in committee
voters_less_than_ell_approved_candidates = []
# compute minimum group size for this ell
group_size = math.ceil(ell * (len(profile) / len(committee)))
# compute list of candidates to consider
for i, voter in enumerate(profile):
if len(voter.approved & committee) < ell:
voters_less_than_ell_approved_candidates.append(i)
# check if an ell-cohesive group can be formed with considered candidates
if len(voters_less_than_ell_approved_candidates) < group_size:
# if not possible then simply continue with next ell
continue
# check all possible combinations of considered voters,
# taken (possible group size) at a time
for combination in itertools.combinations(
voters_less_than_ell_approved_candidates, group_size
):
# to calculate the cut of approved candidates for the considered voters
# initialize the cut to be the approval set of the first candidate in current
# combination
cut = set(profile[combination[0]].approved)
# calculate the cut over all voters for current combination
# (also can skip first voter in combination, but inexpensive enough...)
for j in combination:
cut = cut & profile[j].approved
# if size of cut is >= ell, then combination is an ell-cohesive group
if len(cut) >= ell:
# we have found combination to be an ell-cohesive set, with no voter having
# at least ell approved candidates in committee. Thus EJR fails
detailed_information = {
"cohesive_group": voters_less_than_ell_approved_candidates,
"ell": ell,
"joint_candidates": cut,
}
return False, detailed_information
# if function has not returned by now, then it means that for all ell,
# no ell-cohesive group was found among candidates with less than ell
# approved candidates in committee. Thus committee satisfies EJR
detailed_information = {}
return True, detailed_information
|
f993daf9628ecad18fe25894ccd5fb8882d3e596
| 3,643,708
|
def read_stanford_labels():
"""Read stanford hardi data and label map"""
# First get the hardi data
fetch_stanford_hardi()
hard_img, gtab = read_stanford_hardi()
# Fetch and load
files, folder = fetch_stanford_labels()
labels_file = pjoin(folder, "aparc-reduced.nii.gz")
labels_img = nib.load(labels_file)
return hard_img, gtab, labels_img
|
8c9e5a3586125e7ffe3f9fe36b734ca7c19de53f
| 3,643,709
|
import urllib
def _WrapRequestForUserAgentAndTracing(http_client, trace_token,
trace_email,
trace_log,
gcloud_ua):
"""Wrap request with user-agent, and trace reporting.
Args:
http_client: The original http object.
trace_token: str, Token to be used to route service request traces.
trace_email: str, username to which service request traces should be sent.
trace_log: bool, Enable/diable server side logging of service requests.
gcloud_ua: str, User agent string to be included in the request.
Returns:
http, The same http object but with the request method wrapped.
"""
orig_request = http_client.request
def RequestWithUserAgentAndTracing(*args, **kwargs):
"""Wrap request with user-agent, and trace reporting.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
Wrapped request method with user-agent and trace reporting.
"""
modified_args = list(args)
# Use gcloud specific user-agent with command path and invocation-id.
# Pass in the user-agent through kwargs or args.
def UserAgent(current=''):
user_agent = '{0} {1}'.format(current, gcloud_ua)
return user_agent.strip()
cur_ua = RequestArgsGetHeader(modified_args, kwargs, 'user-agent', '')
RequestArgsSetHeader(modified_args, kwargs,
'user-agent', UserAgent(cur_ua))
# Modify request url to enable requested tracing.
url_parts = urlparse.urlsplit(args[0])
query_params = urlparse.parse_qs(url_parts.query)
if trace_token:
query_params['trace'] = 'token:{0}'.format(trace_token)
elif trace_email:
query_params['trace'] = 'email:{0}'.format(trace_email)
elif trace_log:
query_params['trace'] = 'log'
# Replace the request url in the args
modified_url_parts = list(url_parts)
modified_url_parts[3] = urllib.urlencode(query_params, doseq=True)
modified_args[0] = urlparse.urlunsplit(modified_url_parts)
return orig_request(*modified_args, **kwargs)
http_client.request = RequestWithUserAgentAndTracing
# apitools needs this attribute to do credential refreshes during batch API
# requests.
if hasattr(orig_request, 'credentials'):
setattr(http_client.request, 'credentials', orig_request.credentials)
return http_client
|
d6a2a4c127670aa8409cb39a81833a5a5e3fba90
| 3,643,710
|
def indexData_x(x, ukn_words):
"""
Map each word in the given data to a unique integer. A special index will be kept for "out-of-vocabulary" words.
:param x: The data
:return: Two dictionaries: one where words are keys and indexes values, another one "reversed" (keys->index, values->words)
"""
# Retrieve all words used in the data (with duplicates)
all_text = [w for e in x for w in e]
# Create a DETERMINISTIC set of all words
used = set()
words = [x for x in all_text if x not in used and (used.add(x) or True)]
print("Number of entries: ",len(all_text))
print("Individual entries: ",len(words))
# Assign an integer index for each individual word
word2ind = {word: index for index, word in enumerate(words, 2)}
ind2word = {index: word for index, word in enumerate(words, 2)}
# To deal with out-of-vocabulary words
word2ind.update({ukn_words:1})
ind2word.update({1:ukn_words})
# The index '0' is kept free in both dictionaries
return word2ind, ind2word
|
3f6ffd97d33400c3418b78ad3b383766cc07bee3
| 3,643,711
|
def BFS_TreeSearch(problem):
"""
Tree Search BFS
Args->problem: OpenAI Gym environment
Returns->(path, time_cost, space_cost): solution as a path and stats.
"""
node = Node(problem.startstate, None)
time_cost = 0
space_cost = 1
if node.state == problem.goalstate:
return build_path(node), time_cost, space_cost
frontier = NodeQueue()
frontier.add(node)
while not frontier.is_empty():
current = frontier.remove()
for action in range(problem.action_space.n):
time_cost += 1
child = Node(problem.sample(current.state, action), current)
if(child.state == problem.goalstate):
return build_path(child), time_cost, space_cost # solution
frontier.add(child)
space_cost = max(space_cost,len(frontier))
return None, time_cost, space_cost
|
b2523dea8b9813e2582a0acef27b0d46bb1a14b9
| 3,643,712
|
import os
def remove_metaRotation(gA_rot: GeoArray, rspAlg='cubic') -> GeoArray:
"""Remove any metadata rotation (a rotation that only exists in the map info)."""
gA = GeoArray(*warp_ndarray(gA_rot[:], gA_rot.gt, gA_rot.prj,
rspAlg=rspAlg,
# out_gsd=(gA_rot.xgsd, gA_rot.ygsd)
),
nodata=gA_rot.nodata)
gA.basename = os.path.basename(gA.basename)
gA.meta = gA.meta
return gA
|
354ab23a2514c394c10db3d46ac57ce42660fa15
| 3,643,713
|
from typing import Tuple
from typing import Optional
def _objective_function(extra_features: jnp.ndarray,
media_mix_model: lightweight_mmm.LightweightMMM,
media_input_shape: Tuple[int,
int], media_gap: Optional[int],
target_scaler: Optional[preprocessing.CustomScaler],
media_scaler: preprocessing.CustomScaler,
geo_ratio: jnp.array,
seed: Optional[int],
media_values: jnp.ndarray) -> jnp.float64:
"""Objective function to calculate the sum of all predictions of the model.
Args:
extra_features: Extra features the model requires for prediction.
media_mix_model: Media mix model to use. Must have a predict method to be
used.
media_input_shape: Input shape of the data required by the model to get
predictions. This is needed since optimization might flatten some arrays
and they need to be reshaped before running new predictions.
media_gap: Media data gap between the end of training data and the start of
the out of sample media given. Eg. if 100 weeks of data were used for
training and prediction starts 2 months after training data finished we
need to provide the 8 weeks missing between the training data and the
prediction data so data transformations (adstock, carryover, ...) can take
place correctly.
target_scaler: Scaler that was used to scale the target before training.
media_scaler: Scaler that was used to scale the media data before training.
geo_ratio: The ratio to split channel media across geo. Should sum up to 1
for each channel and should have shape (c, g).
seed: Seed to use for PRNGKey during sampling. For replicability run
this function and any other function that gets predictions with the same
seed.
media_values: Media values required by the model to run predictions.
Returns:
The negative value of the sum of all predictions.
"""
if hasattr(media_mix_model, "n_geos") and media_mix_model.n_geos > 1:
media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)
media_values = jnp.tile(
media_values / media_input_shape[0], reps=media_input_shape[0])
# Distribute budget of each channels across time.
media_values = jnp.reshape(a=media_values, newshape=media_input_shape)
media_values = media_scaler.transform(media_values)
return -jnp.sum(
media_mix_model.predict(
media=media_values.reshape(media_input_shape),
extra_features=extra_features,
media_gap=media_gap,
target_scaler=target_scaler,
seed=seed).mean(axis=0))
|
f8ae8185d84d811dc1d5796aa8374127d2f16ea5
| 3,643,714
|
from typing import Union
from functools import reduce
def decode_block(block: np.ndarray) -> Union[np.ndarray, bool]:
"""
Decode a data block with hamming parity bits.
:param block: The data block to be decoded
:return the decoded data bits, False if the block is invalid
"""
if not block.size & block.size - 1 and block.size & 0x5555_5555:
_block = np.array(block.flat)
flip = reduce(lambda x, y: x ^ y, [i for i, bit in enumerate(_block) if bit] + [1, 1])
if flip:
if reduce(lambda x, y: x ^ y, _block):
warn('Two or more bit-flips occur, self-correction failed.')
warn("Single bit-flip at index {} corrected".format(flip))
_block[flip] = not _block[flip]
return np.array([bit for i, bit in enumerate(_block) if i and i & i - 1])
warn('Invalid block size.')
return False
|
c9ed9eb03271e2222aa62260461aaa7ee90eb842
| 3,643,715
|
def occupancy(meta, ax=None):
""" Show channel occupancy over time.
"""
if ax is None:
f, ax = plt.subplots()
f.set_figwidth(14)
f.suptitle("Occupancy over time")
start_time = meta.read_start_time.min() / 10000 / 60
end_time = meta.read_end_time.max() / 10000 / 60
total_minutes = end_time - start_time
num_channels = meta.channel_number.max()+1
X = np.zeros((num_channels, int(np.ceil(total_minutes))))
for channel, group in meta.groupby("channel_number"):
for index, read in group.iterrows():
a,b = read.read_start_time/10000/60, read.read_end_time / 10000 / 60
X[channel, round(a):round(b)] = 1
ax.imshow(X, aspect= total_minutes/1800, cmap="Greys")
ax.xaxis.set_label_text("Time (in minutes)")
ax.yaxis.set_label_text("Channel number")
return ax.get_figure(), ax
|
f6505a5bf7ff417194457ee2e04118edea9e6738
| 3,643,716
|
def reg1_r_characteristic(r, s, alpha, beta, c, h):
"""
evaluate x - ((4/3)r - (2/3)s)t in region 1, equation 19
"""
# when s < 0 the expression can be factored and you avoid the
# difference of nearly equal numbers and dividing by a small number
# equation 74
rr = r/c
ss = s/c
poly1 = 2.0*rr - ss
poly2 = 3.0*rr*rr - 2.0*rr*ss + ss*ss
poly3 = 4.0*rr**3 - 3.0*rr*rr*ss + 2.0*rr*ss*ss - ss**3
value = np.where(s <= 0., h*(1.0 - (2./3.)*poly2 + (32./135.)*poly3 + (4./9.)*alpha*(poly1 - poly2 + (4./15.)*poly3) - (2./9.)*beta*(poly2 - (8./15.)*poly3)),
evalPhip(r,alpha,beta,c,h)/(r + s + 1e-20) - (evalPhi(r,alpha,beta,c,h) - evalPhi(s,alpha,beta,c,h))/(r + s + 1e-20)**2 )
return value
|
9802483289387b7996665fe8f061d4393ff0daaf
| 3,643,717
|
from typing import Tuple
def olf_gd_offline_in_z(X: np.ndarray, k: int, rtol: float = 1e-6,
max_iter: int = 100000,
rectY: bool = False, rectZ: bool = False,
init: str = 'random', Y0=None, Z0=None,
verbose: bool = False, alpha=1,
cycle=500, rho=1, beta=0.1) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Gradient descent to calculate the solution of the olfactory
cost function
Parameters
----------
X:
input data matrix
k:
number of LNs
etamax:
the initial learning rate
rtol:
relative tolerance
init:
initialization for the Y and Z matrices
Returns
------
Y and Z
Matrices minimizing the olf cost function
"""
D, N = X.shape # n_s is the number of samples
if init != 'given':
Y = get_init_matrix(D, N, method=init, A=X, scale=100, rect=rectY)
# Y = X # i wonder if you should do the simulation with the random matrix,
# so that is it not "cheating" by already taking a solution which resembles
# what you think the solution should be.
if rectY:
Y = rectify(Y)
# Y = X
Z = get_init_matrix(k, N, method=init, A=X, scale=100, rect=rectZ)
# Y = np.array(Y, dtype=FLOAT_TYPE)
# A = np.array(A, dtype=FLOAT_TYPE)
# X = np.array(X, dtype=FLOAT_TYPE, order='F')
# Y = np.array(Y, dtype=FLOAT_TYPE, order='F')
# Z = np.array(Z, dtype=FLOAT_TYPE, order='F')
else:
Y = Y0
Z = Z0
sigma = 0.1
cost0 = olf_cost(X, Y, Z, rho=rho)
cost2 = cost0.copy()
print(f'initial cost: {cost0}')
for j in range(max_iter):
Y_old = Y.copy()
cost_old = cost2
Y, cost2, successY, a1 = gd_step_Y(X, Y, Z, cost_old, sigma,
beta, alpha, rectY, rho=rho)
if not successY:
break
conv1 = np.amax(np.abs(Y - Y_old) / np.abs(Y_old + 1e-2))
d_cost1 = np.abs(cost_old - cost2) / np.abs(cost_old)
if d_cost1 < rtol and conv1 < rtol:
print(f'stopped y iteration because cost and Y stopped changing, {j}')
break
if j % cycle == 0 and j > 0:
alpha *= beta
cost0 = olf_cost(X, Y, Z, rho=rho)
cost2 = cost0.copy()
cost1 = cost0.copy()
print(f'cost after fixing Y: {cost0}')
# eye = np.eye(N)/N
costs = np.zeros(max_iter)
# eye = np.array(eye, dtype=FLOAT_TYPE, order='F')
if rectZ:
funcz = rectify
else:
funcz = lambda x: x
for i in range(max_iter):
# print(i, cost2)
costs[i] = cost2
Y_old = Y.copy()
Z_old = Z.copy()
cost_old2 = cost2.copy()
grad_Z = get_grad_Z(Y, Z, rho=rho)
# grad_Z = -get_grad_Z2(X, Z, rho=rho)
alpha_z = alpha
while alpha_z > EPSILON * 0.000001:
successZ = False
Z_new = funcz(Z_old + alpha_z * grad_Z)
# expected_cost_increase = sigma * np.sum(grad_Z * (Z_new - Z_old))
alpha_y = alpha
Y = Y_old.copy()
cost1 = olf_cost(X, Y, Z_new, rho=rho)
# print(alpha_z, cost1)
for j in range(max_iter):
# print(j, cost1)
Y_old2 = Y.copy()
cost_old1 = cost1
Y, cost1, successY, a1 = gd_step_Y(X, Y_old2, Z_new, cost_old1, sigma,
beta, alpha_y, rectY, rho=rho)
if not successY:
# print('y iteration not successful')
break
conv1 = np.amax(np.abs(Y - Y_old2) / np.abs(Y_old2 + 1e-2))
d_cost1 = np.abs(cost_old1 - cost1) / np.abs(cost_old1)
# print(conv1, d_cost1)
if d_cost1 < rtol and conv1 < rtol:
# print(f'stopped y iteration because cost and Y'
# f'stopped changing, {j}, {alpha_y}')
break
# print(f'i, j: {i}, {j}, after y iteration: costs: {cost2}, {cost_old1}, {cost1}')
# cost_new = olf_cost(X, Y, Z_new, rho=rho)
# print(expected_cost_increase, cost_new - cost_old)
cost_new = cost1
# if cost_new - cost_old2 > expected_cost_increase:
if cost_new - cost_old2 > 0:
# print(f'z iteration successful, {cost_old2}, {cost_new}')
successZ = True
break
alpha_z *= beta
if successZ:
Z = Z_new
cost2 = cost_new
else:
print('stopped because Z gd steps was unsuccessfull')
break
convz = np.amax(np.abs(Z-Z_old) / np.abs(Z_old + 1e-2))
d_cost2 = np.abs(cost_old2 - cost2) / np.abs(cost_old2)
if d_cost2 < rtol and convz < rtol:
print('stopped because costs and Z stopped changing')
break
if i % cycle == 0 and i > 0:
alpha *= beta
print(i, 'costs:', cost_old, cost1, cost2)
# print('costs:', cost_old, cost1, cost2)
# break
print(f'i: {i}, costs: {cost0}, {cost2}')
return Y, Z, costs[:i+1]
|
2397b0d24fa8fa9b8ddb9d7dd8553bd076feeb39
| 3,643,718
|
from typing import List
from typing import Dict
from typing import Any
import sys
def get_features(model_description_features: List[Dict[str, Any]]):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : List[Dict[str, Any]]
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
- pixel_env: 0
]
"""
return utils.get_objectlist(
model_description_features, config_key="features", module=sys.modules[__name__]
)
|
77f904fe28ea175a06671dc4533fb0e36ae2d593
| 3,643,719
|
def get_cluster_codes(cluster: pd.Categorical) -> pd.Series:
"""Get the X location for plotting p-value string."""
categories = cluster.cat.categories.rename("cluster")
return pd.Series(range(len(categories)), index=categories, name="x")
|
bb6899e5245c14e47ac855fef95775c282a8ed0f
| 3,643,720
|
import math
import copy
def _expand_configurations_from_chain(chain, *, pragma: str = 'pytmc',
allow_no_pragma=False):
"""
Wrapped by ``expand_configurations_from_chain``, usable for callers that
don't want the full product of all configurations.
"""
def handle_scalar(item, pvname, config):
"""Handler for scalar simple or structured items."""
yield item, config
def handle_array_complex(item, pvname, config):
"""Handler for arrays of structured items (or enums)."""
low, high = item.array_info.bounds
expand_digits = math.floor(math.log10(high)) + 2
array_element_pragma = config.get('array', '')
for idx in parse_array_settings(array_element_pragma, (low, high)):
# shallow-copy; only touching the top level "pv" key
idx_config = copy.copy(config)
idx_config['pv'] += get_array_suffix(
config, idx, default=f':%.{expand_digits}d')
yield parser._ArrayItemProxy(item, idx), idx_config
def get_all_options(subitems, handler, pragmas):
split_pragma = split_pytmc_pragma('\n'.join(pragmas))
for pvname, separated_cfg in separate_configs_by_pv(split_pragma):
config = dictify_config(separated_cfg)
# config will have the SUBITEM key, applicable to its level
# in the hierarchy. If it exists, merge it with our current set.
if SUBITEM in config:
_merge_subitems(subitems, config[SUBITEM])
for key, value in subitems.get(PRAGMA, []):
config[key] = value
yield from handler(item, pvname, config)
# `subitems` keeps track of forward references with pragmas of members
# and sub-members (and so on)
subitems = {}
for item in chain:
subitems = subitems.get(item.name, {})
pragmas = list(pragma for pragma in get_pragma(item, name=pragma)
if pragma is not None)
if not pragmas:
if allow_no_pragma:
pragmas = [None]
yield [(item, None)]
continue
# If any pragma in the chain is unset, escape early
return []
if item.array_info and (item.data_type.is_complex_type or
item.data_type.is_enum):
options = get_all_options(subitems, handle_array_complex, pragmas)
else:
options = get_all_options(subitems, handle_scalar, pragmas)
yield list(options)
|
b1282a9cf65875be3e91c3f0eb09a73f0130ccf9
| 3,643,721
|
import base64
def encrypt(data=None, key=None):
"""
Encrypts data
:param data: Data to encrypt
:param key: Encryption key (salt)
"""
k = _get_padded_key(key)
e = AES.new(k, AES.MODE_CFB, k[::-1])
enc = e.encrypt(data)
return base64.b64encode(enc)
|
668a5e94ea6d1adddb038f5cab1f0d165bb98bb0
| 3,643,722
|
import logging
import os
from datetime import datetime
def get_logger(logdir_path=None):
"""logging.Logger
Args
----
logdir_path: str
path of the directory where the log files will be output
Returns
-------
logger (logging.Logger): instance of logging.Logger
"""
log_format = (
'%(levelname)-8s - %(asctime)s - '
'[%(real_filename)s %(real_funcName)s %(real_lineno)d] %(message)s'
)
sh = logging.StreamHandler()
sh.addFilter(CustomFilter())
# sh.setLevel(logging.INFO)
if logdir_path is None:
logging.basicConfig(handlers=[sh],
format=log_format,
level=logging.INFO)
else:
if not os.path.exists(logdir_path):
os.makedirs(logdir_path)
logfile_path = logdir_path + str(datetime.date.today()) + '.log'
fh = logging.FileHandler(logfile_path)
fh.addFilter(CustomFilter())
logging.basicConfig(
handlers=[sh, fh],
format=log_format,
level=logging.INFO
)
logger = logging.getLogger(__name__)
return logger
|
d97b49508490bd82eefa5a929a8b8e680431e766
| 3,643,723
|
def box_in_k_largest(boxes, box, k):
"""Returns True if `box` is one of `k` largest boxes in `boxes`. If there are ties that
extend beyond k, they are included."""
if len(boxes) == 0:
return False
boxes = sorted(boxes, reverse=True, key=box_volume)
n = len(boxes)
prev = box_volume(boxes[0])
for i in range(n):
vol = box_volume(boxes[i])
if i >= k:
if vol < prev:
break
prev = vol
if np.array_equal(boxes[i], box):
return True
return False
|
e941513e47db5fb09e21b96933c629cf3c39bf49
| 3,643,724
|
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Returns specified diagonals.
If `a` is 2-D, returns the diagonal of a with the given offset, i.e., the
collection of elements of the form a[i, i+offset]. If `a` has more than two
dimensions, then the axes specified by axis1 and axis2 are used to determine
the 2-D sub-array whose diagonal is returned. The shape of the resulting
array can be determined by removing axis1 and axis2 and appending an index
to the right equal to the size of the resulting diagonals.
Args:
a (Tensor): Array from which the diagonals are taken.
offset (int): optional. Offset of the diagonal from the main diagonal.
Can be positive or negative. Defaults to main diagonal (0).
axis1 (int): optional. Axis to be used as the first axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
first axis (0).
axis2 (int): optional. Axis to be used as the second axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
second axis (1).
Returns:
Tensor, if `a` is 2-D, then a 1-D array containing the diagonal. If
a.ndim > 2, then the dimensions specified by axis1 and axis2 are removed,
and a new axis inserted at the end corresponding to the diagonal.
Raises:
ValueError: if the input tensor has less than two dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> a = np.arange(4).reshape(2,2)
>>> print(a)
[[0 1]
[2 3]]
>>> output = np.diagonal(a)
>>> print(output)
[0 3]
>>> output = np.diagonal(a, 1)
>>> print(output)
[1]
>>> a = np.arange(8).reshape(2, 2, 2)
>>> print(a)
[[[0 1]
[2 3]]
[[4 5]
[6 7]]]
>>> output = np.diagonal(a, 0, 0, 1)
>>> print(output)
[[0 6]
[1 7]]
"""
ndim = F.rank(a)
if ndim < 2:
return _raise_value_error('diagonal requires an array of at least two dimensions')
dtype = F.dtype(a)
if _is_empty(F.shape(a)):
return _empty(dtype, (0,))
cast_type = dtype
if not isinstance(dtype, Float):
# reduce_sum only supports float types
cast_type = mstype.float32
a = F.cast(a, cast_type)
axes = _check_axis_valid((axis1, axis2), ndim)
perm = ()
for i in range(ndim):
if i not in axes:
perm += (i,)
perm += axes
a = transpose(a, perm)
shape = F.shape(a)
n, m = shape[-2:]
e = _eye(n, m, offset, cast_type)
e = _expand(e, ndim)
e = _broadcast_to(e, F.shape(e), F.shape(a), ndim)
prod = F.tensor_mul(a, e)
res = F.reduce_sum(prod, -1)
begin = ()
for i in range(ndim-2):
begin += (0,)
last_dim_begin = _max(0, -offset)
begin += (last_dim_begin,)
size = F.shape(res)[:-1]
last_dim_end = _min(
shape[-2], _max(0, shape[-1] - offset)) - last_dim_begin
if last_dim_end <= 0:
return _empty(dtype, size + (0,))
size += (last_dim_end,)
res = F.tensor_slice(res, begin, size)
if not _check_same_type(cast_type, dtype):
res = F.cast(res, dtype)
return res
|
64ada8a83fd1162e7d84e84007be0263cd71bd0c
| 3,643,725
|
def available_number_of_windows_in_array(n_samples_array, n_samples_window, n_advance):
"""
Parameters
----------
n_samples_array
n_samples_window
n_advance
Returns
-------
"""
stridable_samples = n_samples_array - n_samples_window
if stridable_samples < 0:
print("Window is longer than the time series")
raise Exception
available_number_of_strides = int(np.floor(stridable_samples / n_advance))
return available_number_of_strides + 1
|
cab937efe4408d4707b601d4a0a68782d062ab36
| 3,643,726
|
import torch
def tensor_to_image(tensor: torch.tensor) -> ndarray:
"""
Convert a torch tensor to a numpy array
:param tensor: torch tensor
:return: numpy array
"""
image = TENSOR_TO_PIL(tensor.cpu().clone().squeeze(0))
return image
|
fb3f16ec6cee1c50d2b7e6f2e31bd94aa300cdfd
| 3,643,727
|
def shimizu_mirioka(XYZ, t, a=0.75, b=0.45):
"""
The Shinizu-Mirioka Attractor.
x0 = (0.1,0,0)
"""
x, y, z = XYZ
x_dt = y
y_dt = (1 - z) * x - a * y
z_dt = x**2 - b * z
return x_dt, y_dt, z_dt
|
60e5b52e1755de8bcc966364d828d47b05af3723
| 3,643,728
|
def draw_cap_peaks_rh_coord(img_bgr, rafts_loc, rafts_ori, raft_sym, cap_offset, rafts_radii, num_of_rafts):
"""
draw lines to indicate the capillary peak positions
in right-handed coordinate
:param numpy array img_bgr: the image in bgr format
:param numpy array rafts_loc: the locations of rafts
:param numpy array rafts_ori: the orientation of rafts, in deg
:param int raft_sym: the symmetry of raft
:param int cap_offset: the angle between the dipole direction
and the first capillary peak, in deg
:param numpy array rafts_radii: radii of the rafts
:param int num_of_rafts: num of rafts
:return: bgr image file
"""
line_thickness = int(2)
line_color2 = (0, 255, 0)
cap_gap = 360 / raft_sym
# cap_offset = 45 # the angle between the dipole direction and the first capillary peak
output_img = img_bgr
height, width, _ = img_bgr.shape
for raft_id in np.arange(num_of_rafts):
for capID in np.arange(raft_sym):
# note that the sign in front of the sine term is "+"
line_start = (rafts_loc[raft_id, 0], height - rafts_loc[raft_id, 1])
line_end = (int(rafts_loc[raft_id, 0] + np.cos((rafts_ori[raft_id] + cap_offset + capID * cap_gap)
* np.pi / 180) * rafts_radii[raft_id]),
height - int(rafts_loc[raft_id, 1] + np.sin((rafts_ori[raft_id] + cap_offset + capID * cap_gap)
* np.pi / 180) * rafts_radii[raft_id]))
output_img = cv.line(output_img, line_start, line_end, line_color2, line_thickness)
return output_img
|
c180a23d7ad6a04d8e56a61a0cb5e058bf1e5d95
| 3,643,729
|
import subprocess
def invoke(command):
"""Invoke sub-process."""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
status = 0
except subprocess.CalledProcessError as error: # pragma: no cover
output = error.output
status = error.returncode
return status, output
|
fb4023b6118d63a1bbe2551bc007ce2f750751dd
| 3,643,730
|
def pack_bidirectional_lstm_state(state, num_layers):
"""
Pack the hidden state of a BiLSTM s.t. the first dimension equals to the number of layers.
"""
assert (len(state) == 2 * num_layers)
_, batch_size, hidden_dim = state.size()
layers = state.view(num_layers, 2, batch_size, hidden_dim).transpose(1, 2).contiguous()
state = layers.view(num_layers, batch_size, -1)
return state
|
de102ce55deceb5ca7211def122dc2767c35cdd3
| 3,643,731
|
import copy
def _create_record_from_template(template, start, end, fasta_reader):
"""Returns a copy of the template variant with the new start and end.
Updates to the start position cause a different reference base to be set.
Args:
template: third_party.nucleus.protos.Variant. The template variant whose
non-location and reference base information to use.
start: int. The desired new start location.
end: int. The desired new end location.
fasta_reader: GenomeReferenceFai object. The reader used to determine the
correct start base to use for the updated variant.
Returns:
An updated third_party.nucleus.protos.Variant with the proper start, end,
and reference base set and all other fields inherited from the template.
"""
retval = copy.deepcopy(template)
retval.start = start
retval.end = end
if start != template.start:
retval.reference_bases = fasta_reader.query(
ranges.make_range(retval.reference_name, start, start + 1))
return retval
|
62c9ff204cff3887daad0df4f710cc54f9c8dad9
| 3,643,732
|
import xlrd
import xlrd.sheet
from datetime import datetime
import os
import copy
import hashlib
import subprocess
import shutil
def create_outputfile(prxdoc,inputfiles_element,inputfilehref,nominal_outputfilehref,outputfilehref,outputdict,ignore_locking):
"""Create the output XML file from the raw input by running any filters, etc.
It will be presumed that the output XML file will eventually be referred to by nominal_outputfilehref,
but the actual file written will be outputfilehref"""
# print("inputfilehref=%s" % (inputfilehref.humanurl()))
if inputfilehref.get_bare_unquoted_filename().lower().endswith(".xls") or inputfilehref.get_bare_unquoted_filename().lower().endswith(".xlsx"):
try:
inputfileelement=outputdict[inputfilehref].inputfileelement
# Any dc: namespace elements within the inputfileelement
# will get placed in a dc:summary tag
timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat()
spreadsheet=xlrd.open_workbook(inputfilehref.getpath())
sheetname=prxdoc.getattr(inputfileelement,"sheetname",spreadsheet.sheet_names()[0])
sheet=spreadsheet.sheet_by_name(sheetname)
titlerow=int(prxdoc.getattr(inputfileelement,"titlerow","1"))-1
# titlerow=sheet.row(titlerownum)
nrows=sheet.nrows
ncols=sheet.ncols
rawtitles = [ str(sheet.cell(titlerow,col).value).strip() for col in range(ncols) ]
tagnames = [ convert_to_tagname(splitunits(rawtitle)[0]) if rawtitle is not None and len(rawtitle) > 0 else "blank" for rawtitle in rawtitles ]
unitnames = [ convert_to_tagname(splitunits(rawtitle)[1]) if rawtitle is not None and len(rawtitle) > 0 else None for rawtitle in rawtitles ]
nsmap=copy.deepcopy(prx_nsmap)
nsmap["ls"] = "http://limatix.org/spreadsheet"
outdoc=xmldoc.xmldoc.newdoc("ls:sheet",nsmap=nsmap,contexthref=outputfilehref)
# Copy dc: namespace elements within inputfileelement
# into a dc:summary tag
inputfileel_children=prxdoc.children(inputfileelement)
summarydoc=None
for inputfileel_child in inputfileel_children:
if prxdoc.gettag(inputfileel_child).startswith("dc:"):
if summarydoc is None:
summarydoc=xmldoc.xmldoc.newdoc("dc:summary",nsmap=nsmap,contexthref=prxdoc.getcontexthref())
pass
# place in document with same context as where it came from
summarydoc.getroot().append(copy.deepcopy(inputfileel_child))
pass
pass
if summarydoc is not None:
# shift summary context and then copy it into outdoc
summarydoc.setcontexthref(outdoc.getcontexthref())
outdoc.getroot().append(copy.deepcopy(summarydoc.getroot()))
pass
# Copy spreadsheet table
for row in range(titlerow+1,nrows):
rowel=outdoc.addelement(outdoc.getroot(),"ls:row")
rownumel=outdoc.addelement(rowel,"ls:rownum")
outdoc.settext(rownumel,str(row))
for col in range(ncols):
cell=sheet.cell(row,col)
cell_type=xlrd.sheet.ctype_text.get(cell.ctype,'unknown')
if cell_type=="empty":
continue
cellel=outdoc.addelement(rowel,"ls:"+tagnames[col])
outdoc.setattr(cellel,"ls:celltype",cell_type)
hyperlink=sheet.hyperlink_map.get((row,col))
if cell_type=="text" and hyperlink is None:
outdoc.settext(cellel,cell.value)
pass
elif cell_type=="text" and hyperlink is not None:
# Do we need to do some kind of conversion on
# hyperlink.url_or_path()
outdoc.settext(cellel,cell.value)
hyperlink_href=dcv.hrefvalue(hyperlink.url_or_path,contexthref=inputfilehref)
hyperlink_href.xmlrepr(outdoc,cellel)
pass
elif cell_type=="number":
if unitnames[col] is not None:
outdoc.setattr(cellel,"dcv:units",unitnames[col])
pass
outdoc.settext(cellel,str(cell.value))
pass
elif cell_type=="xldate":
outdoc.settext(cellel,datetime.datetime(xlrd.xldate_as_tuple(cell.value,spreadsheet.datemode)).isoformat())
pass
elif cell_type=="bool":
outdoc.settext(cellel,str(bool(cell.value)))
pass
elif cell_type=="error":
outdoc.settext(cellel,"ERROR %d" % (cell.value))
pass
else:
raise ValueError("Unknown cell type %s" %(cell_type))
pass
pass
# Did the user provide a prx:xslt href indicating
# a transformation to apply?
xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None)
if xslttag is not None:
# Replace outdoc with transformed copy
outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,outdoc)
pass
# Write out under new file name outputfilehref
assert(outputfilehref != inputfilehref)
outdoc.set_href(outputfilehref,readonly=False)
outdoc.close()
canonhash=None # could hash entire input file...
pass
except ImportError:
raise(ImportError("Need to install xlrd package in order to import .xls or .xlsx files"))
pass
elif inputfilehref.has_fragment():
# input file url has a fragment... we're only supposed
# to extract a portion of the file
timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat()
if inputfilehref.fragless()==prxdoc.get_filehref():
inputfilecontent=prxdoc # special case where input file is .prx file
pass
else:
inputfilecontent=xmldoc.xmldoc.loadfile(inputfilehref.getpath())
pass
inputfileportion=inputfilehref.evaluate_fragment(inputfilecontent)
if len(inputfileportion)==0:
raise ValueError("Input URL %s fragment reference failed to resolve" % (inputfilehref.humanurl()))
elif len(inputfileportion) > 1:
raise ValueError("Input URL %s fragment reference resolved to multiple elements" % (inputfilehref.humanurl()))
#print("inputfilehref=%s" % (inputfilehref.humanurl()))
#print("inputfileportion=%s" % (etree.tostring(inputfileportion[0])))
#import pdb as pythondb
#pythondb.set_trace()
outdoc=xmldoc.xmldoc.copy_from_element(inputfilecontent,inputfileportion[0],nsmap=prx_nsmap) # NOTE: prx_nsmap doesn't make much difference here because the nsmap of the element is copied in. prx_nsmap just makes our prefixes available through xmldoc
# Create canonicalization from unmodified outdoc so that we can hash it
outdoc_canon=BytesIO()
outdoc.doc.write_c14n(outdoc_canon,exclusive=False,with_comments=True)
canonhash=hashlib.sha256(outdoc_canon.getvalue()).hexdigest()
if inputfileportion[0] is outputdict[inputfilehref].inputfileelement:
# special case where this input file href with fragment
# points to its very tag -- the <inputfiles> tag in the prxfile
# auto-populate corresponding <outputfile> tags
# i.e. modify outdoc to make sure there is an <outputfile> tag with an xlink:href
# for each inputfile
assert(inputfilecontent.gettag(inputfileportion[0])=="prx:inputfiles")
outdoc_inputfiletags=[ outdoc.getroot() ] # treat the root <inputfiles> tag as an inputfile
outdoc_inputfiletags.extend(outdoc.xpath("prx:inputfile"))
for outdoc_inputfiletag in outdoc_inputfiletags:
if outdoc_inputfiletag is outdoc.getroot() and not outdoc.hasattr(outdoc_inputfiletag,"xlink:href"):
# root prx:inputfiles tag has no xlink:href
assert(outdoc.gettag(outdoc_inputfiletag)=="prx:inputfiles")
outdoc_inputfilehref = inputfilehref # subsegment of input file
pass
elif outdoc.hasattr(outdoc_inputfiletag,"xlink:href") and outdoc_inputfiletag is not outdoc.getroot():
outdoc_inputfilehref = dcv.hrefvalue.fromxml(outdoc,outdoc_inputfiletag) # specified input file
pass
else:
raise ValueError("Bad <prx:inputfiles> or <prx:inputfile> tag at %s" % (dcv.hrefvalue.fromelement(outdoc,outdoc_inputfiletag).humanurl()))
#print("outdoc_inputfilehref:")
#print(outdoc_inputfilehref)
#print("outputdict keys:")
#print(outputdict.keys())
assert(outdoc_inputfilehref in outputdict) # all of these input file references should be keys to the output dict because outputdict was made from the originals!
# Find or create prx:outputfile tag
outdoc_outputfiletag = outdoc.child(outdoc_inputfiletag,"prx:outputfile")
if outdoc_outputfiletag is None:
outdoc_outputfiletag=outdoc.addelement(outdoc_inputfiletag,"prx:outputfile")
pass
# Ensure prx:outputfile tag has a hyperlink
if not outdoc.hasattr(outdoc_outputfiletag,"xlink:href"):
outputdict[outdoc_inputfilehref].outputfilehref.xmlrepr(outdoc,outdoc_outputfiletag)
pass
pass
pass
# Did the user provide a prx:xslt href indicating
# a transformation to apply?
xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None)
if xslttag is not None:
outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,outdoc)
pass
# Write out selected portion under new file name outputfilehref
assert(outputfilehref != inputfilehref)
outdoc.set_href(outputfilehref,readonly=False)
outdoc.close()
pass
else:
# input file url has no fragment, not .xls or .xlsx: treat it as XML
# extract the whole thing!
# Do we have an input filter? ... stored as xlink:href in <inputfilter> tag
canonhash=None # (we could hash the entire inputfile!)
inputfilters=prxdoc.xpathcontext(outputdict[inputfilehref].inputfileelement,"prx:inputfilter")
if len(inputfilters) > 1:
raise ValueError("Maximum of one <inputfilter> element permitted in .prx file")
timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat()
xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None)
if len(inputfilters) > 0:
# have an input filter
inputfilter=inputfilters[0]
# run input filter
# Get path from xlink:href
#inputfilterpath=prxdoc.get_href_fullpath(inputfilter)
inputfilterhref=dcv.hrefvalue.fromxml(prxdoc,inputfilter)
inputfilterpath=inputfilterhref.getpath()
# build arguments
inputfilterargs=[inputfilterpath]
# pull attributes named param1, param2, etc. from inputfilter tag
cnt=1
while "param"+cnt in inputfilter.attrib:
inputfilterargs.append(inputfilter.attrib["param"+cnt])
cnt+=1
pass
# add input and output filenames as params to filter
inputfilterargs.append(inputfilehref.getpath())
inputfilterargs.append(outputfilehref.getpath())
# Call input filter... will raise
# exception if input filter fails.
subprocess.check_call(*inputfilterargs)
pass
elif xslttag is not None:
indoc=xmldoc.xmldoc.loadhref(inputfilehref,nsmap=prx_nsmap,readonly=True)
outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,indoc)
# Write out under new file name outputfilehref
assert(outputfilehref != inputfilehref)
outdoc.set_href(outputfilehref,readonly=False)
outdoc.close()
pass
else:
# use shutil to copy input to output
shutil.copyfile(inputfilehref.getpath(),outputfilehref.getpath())
pass
pass
return (canonhash,timestamp)
|
7243bf433f1b05631f2ee77911b0b535fa478a26
| 3,643,733
|
import os
import io
import zlib
def get_example_data(filepath: str, is_gzip: bool = True, make_bytes: bool = False) -> BytesIO:
"""
获取示例数据,下载打开文件,进行解压缩。
:param filepath: 文件路径。
:param is_gzip: 是否压缩的。
:param make_bytes: 字节数据。
:return:
"""
# 如果本地存在则从本地加载
local_path = os.path.join(EXAMPLES_FOLDER, "data", filepath)
if os.path.exists(local_path):
with io.open(local_path, "rb") as f:
content = f.read()
else:
content = request.urlopen(f"{BASE_URL}{filepath}?raw=true").read()
if is_gzip:
content = zlib.decompress(content, zlib.MAX_WBITS | 16)
if make_bytes:
content = BytesIO(content)
return content
|
cebcd2773b8958130cbe2d3bc4d8fca3ed926321
| 3,643,734
|
import time
def convert_time(time_string):
"""
Input a time in HH:MM:SS form and output
a time object representing that
"""
return time.strptime(time_string, "%H:%M")
|
f34b46fe8cd242ee12a9768102486cba243d94df
| 3,643,735
|
from typing import Union
import torch
def get_distributed_mean(value: Union[float, torch.Tensor]):
"""Computes distributed mean among all nodes."""
if check_torch_distributed_initialized():
# Fix for runtime warning:
# To copy construct from a tensor, it is recommended to use
# sourceTensor.clone().detach() or
# sourceTensor.clone().detach().requires_grad_(True),
# rather than torch.tensor(sourceTensor).
if torch.is_tensor(value):
value = (
value.clone()
.detach()
.to(device=f"cuda:{torch.cuda.current_device()}")
)
else:
value = torch.tensor(
value,
dtype=torch.float,
device=f"cuda:{torch.cuda.current_device()}",
requires_grad=False,
)
torch.distributed.all_reduce(value)
value = float(value.item() / torch.distributed.get_world_size())
return value
|
9643b522e838a3a0aabcfab7021d4bac7d58e21d
| 3,643,736
|
def js_div(A, B):
""" Jensen-Shannon divergence between two discrete probability
distributions, represented as numpy vectors """
norm_A = A / A.sum()
norm_B = B / B.sum()
M = (norm_A+norm_B)/2
return 0.5 * (kl_div(norm_A,M)+kl_div(norm_B,M))
|
1e4ac763d01f3ae3d25907d24229301d464de527
| 3,643,737
|
from typing import Dict
def _build_request_url(
base: str,
params_dict: Dict[str, str]) -> str:
"""Returns an URL combined from base and parameters
:param base: base url
:type base: str
:param params_dict: dictionary of parameter names and values
:type params_dict: Dict[str, str]
:return: a complete url
:rtype: str
"""
parameters = "&".join([f"{k}={v}" for k, v in params_dict.items()])
url = base + "?" + parameters
return url
|
30e27cf55692884be408218403c2f94279516ad2
| 3,643,738
|
def aesDecrypt(key, data):
"""AES decryption fucnction
Args:
key (str): packed 128 bit key
data (str): packed encrypted data
Returns:
Packed decrypted data string
"""
cipher = python_AES.new(key)
return cipher.decrypt(data)
|
15c70d0699a22bf58ca191ba2fea2d5eb7942b1b
| 3,643,739
|
def __format_number_input(number_input, language):
"""Formats the specified number input.
Args:
number_input (dict): A number input configuration to format.
language (dict): A language configuration used to help format the input configuration.
Returns:
dict: A formatted number input configuration.
"""
placeholder = number_input.get("placeholder")
if placeholder is not None:
number_input["placeholder"] = normalize_configuration_string(placeholder, language["default"])
return number_input
|
0cd76b74396c013d7f76ae5ae11ace56db6552ab
| 3,643,740
|
def get_players(picks):
"""Return the list of players in the team
"""
players = []
for rd in picks:
play = list(rd.keys())
players = players+play
players = list(set(players))
return players
|
79963bc19af662d44d4eaf29a04995ede331706c
| 3,643,741
|
def verify_file_details_exists(device,
root_path,
file,
max_time=30,
check_interval=10):
""" Verify file details exists
Args:
device ('obj'): Device object
root_path ('str'): Root path for command
file ('str'): File name
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
Boolean
Raises:
None
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse(
'file list {root_path} detail'.format(root_path=root_path))
except SchemaEmptyParserError as e:
timeout.sleep()
continue
file_found = Dq(out).contains_key_value('file-name',
file,
value_regex=True)
if file_found:
return True
timeout.sleep()
return False
|
da0e33ca67b9e70dc4b5345ba626a193bcdefdbd
| 3,643,742
|
import collections
from typing import Literal
def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
"""
Returns a new graph with a VoID description of the passed dataset
For more info on Vocabulary of Interlinked Datasets (VoID), see:
http://vocab.deri.ie/void
This only makes two passes through the triples (once to detect the types
of things)
The tradeoff is that lots of temporary structures are built up in memory
meaning lots of memory may be consumed :)
I imagine at least a few copies of your original graph.
the distinctForPartitions parameter controls whether
distinctSubjects/objects are tracked for each class/propertyPartition
this requires more memory again
"""
typeMap = collections.defaultdict(set)
classes = collections.defaultdict(set)
for e, c in g.subject_objects(RDF.type):
classes[c].add(e)
typeMap[e].add(c)
triples = 0
subjects = set()
objects = set()
properties = set()
classCount = collections.defaultdict(int)
propCount = collections.defaultdict(int)
classProps = collections.defaultdict(set)
classObjects = collections.defaultdict(set)
propSubjects = collections.defaultdict(set)
propObjects = collections.defaultdict(set)
for s, p, o in g:
triples += 1
subjects.add(s)
properties.add(p)
objects.add(o)
# class partitions
if s in typeMap:
for c in typeMap[s]:
classCount[c] += 1
if distinctForPartitions:
classObjects[c].add(o)
classProps[c].add(p)
# property partitions
propCount[p] += 1
if distinctForPartitions:
propObjects[p].add(o)
propSubjects[p].add(s)
if not dataset:
dataset = URIRef("http://example.org/Dataset")
if not res:
res = Graph()
res.add((dataset, RDF.type, VOID.Dataset))
# basic stats
res.add((dataset, VOID.triples, Literal(triples)))
res.add((dataset, VOID.classes, Literal(len(classes))))
res.add((dataset, VOID.distinctObjects, Literal(len(objects))))
res.add((dataset, VOID.distinctSubjects, Literal(len(subjects))))
res.add((dataset, VOID.properties, Literal(len(properties))))
for i, c in enumerate(classes):
part = URIRef(dataset + "_class%d" % i)
res.add((dataset, VOID.classPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(classCount[c])))
res.add((part, VOID.classes, Literal(1)))
res.add((part, VOID["class"], c))
res.add((part, VOID.entities, Literal(len(classes[c]))))
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
res.add(
(part, VOID.properties, Literal(len(classProps[c]))))
res.add((part, VOID.distinctObjects,
Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
res.add((dataset, VOID.propertyPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(propCount[p])))
res.add((part, VOID.properties, Literal(1)))
res.add((part, VOID.property, p))
if distinctForPartitions:
entities = 0
propClasses = set()
for s in propSubjects[p]:
if s in typeMap:
entities += 1
for c in typeMap[s]:
propClasses.add(c)
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
res.add((part, VOID.distinctSubjects,
Literal(len(propSubjects[p]))))
res.add((part, VOID.distinctObjects,
Literal(len(propObjects[p]))))
return res, dataset
|
66f8b5824017fd41995783c75de72451d22ea023
| 3,643,743
|
def extract_screen_name_from_twitter_url(url):
"""
Function returning the screen_name from a given Twitter url.
Args:
url (str) : Url from which we extract the screen_name if found.
Returns:
str : screen_name if the url is a valid twitter url, None otherwise.
"""
parsed_twitter_url = parse_twitter_url(url)
if isinstance(parsed_twitter_url, TwitterUser):
return parsed_twitter_url.screen_name
if isinstance(parsed_twitter_url, TwitterTweet):
return parsed_twitter_url.user_screen_name
return None
|
3bbc00f2b4fb0aa0b49154d37802a50204f05ccf
| 3,643,744
|
def sub_vectors(a, b):
"""Subtracts two vectors.
Args:
pos1 (tuple[int]): first position
pos1:(tuple[int]): second position
Returns:
tuple[int]: element wise subtraction
Examples:
>>> sub_vectors((1,4,6), (1,3,7))
(0, 1, -1)
"""
return tuple(a[i] - b[i] for i in range(3))
|
02c35bf46311142a3f3e90cd803d908c6ff63896
| 3,643,745
|
def get_prediction_info(predicted_one_hot, predicted_int, y_test, PLOTS_DIR, filename = "test_file"):
"""
Saves useful information for error analysis in plots directory
:param predicted_one_hot:
:param predicted_int:
:param y_test:
:param PLOTS_DIR:
:return:
"""
def get_info_for_label(label):
false_dict = {}
number = 0
if label == False:
number = 1
for i in range(len(predicted_one_hot)):
false_dict[i] = predicted_one_hot[i][number]
temp_dict = false_dict
sorted_index = sorted(false_dict, key=false_dict.get, reverse=True)
file = str(label) + "\n"
file += "Index;probability;correct?\n"
for i in range(len(sorted_index)):
correct = "No"
index = sorted_index[i]
if predicted_int[index] == y_test[index]:
correct = "Yes"
file += str(index) + ";" + str(temp_dict[index]) + ";" + correct + "\n"
print(sorted_index[:5])
return file, sorted_index
file = "Predictions True;Predictions False;Correctly predicted?\n"
max_true_value = 0.0
max_false_value = 0.0
max_true_index = -1
worst_true_index = -1
max_false_index = -1
worst_false_index = -1
for i, pred in enumerate(predicted_one_hot):
correctly_pred = -1
if predicted_int[i] == y_test[i]:
correctly_pred = "Yes"
else:
correctly_pred = "No"
file += str(pred[0]) + ";" + str(pred[1]) + ";" + str(correctly_pred) + "\n"
if pred[0] > max_true_value:
max_true_value = pred[0]
max_true_index = i
if predicted_int[i] != y_test[i]:
worst_true_index = i
if pred[1] > max_false_value:
max_false_value = pred[1]
max_false_index = i
if predicted_int[i] != y_test[i]:
worst_false_index = i
file += "\nStatistics\n"
file += "max_true_value: " + str(max_true_value) + "\n"
file += "max_true_index: " + str(max_true_index) + "\n"
file += "max_false_value: " + str(max_false_value) + "\n"
file += "max_false_index: " + str(max_false_index) + "\n"
file += "worst_true_index: " + str(worst_true_index) + "\n"
file += "worst_false_index: " + str(worst_false_index) + "\n"
file += "===================================================\n"
file += "===================================================\n"
info_false, sorted_false = get_info_for_label(False)
info_true, sorted_true = get_info_for_label(True)
with open(PLOTS_DIR + filename+".txt", "w+") as text_file:
text_file.write(file + info_false + info_true)
return sorted_true, sorted_false, worst_true_index, worst_false_index
|
76d95c3793ee29c211d7f32e727e9bf046c075eb
| 3,643,746
|
def save_excel_file():
"""File save dialog for an excel file.
Returns:
str: file path
"""
return pick_excel_file(save=True)
|
392c014a959a6d61cfa02ca041d0496560df4dec
| 3,643,747
|
def load_app_paths(file_path=None, dir_path=None, user_file_path=None,
user_dir_path=None, default=None, paths=None, **kwargs):
"""Parse and merge user and app config files
User config will have precedence
:param file_path: Path to the base config file
:param dir_path: Path to the extension config directory
:param user_file_path: Path to the user base config file
:param user_dir_path: Path to the user base config file
:param default: Path to be preppended as the default config file embedded
in the app
:param paths: Extra paths to add to the parsing after the defaults
:param force_extension: only read files with given extension.
:returns: Single dict with all the loaded config
"""
files = [default, file_path, dir_path, user_file_path, user_dir_path]
files += (paths or [])
return load_paths([path for path in files if path], **kwargs)
|
d5f6fe9b8db396f95656d80fea19dc0f95fba642
| 3,643,748
|
def search_playlists(spotify_token, playlist):
"""
:param spotify_token:
:param playlist:
:return:
"""
return _search(spotify_token, query=playlist, type='playlist', limit=9, market='ES', offset=0)
|
cf2ab61a4f967c8cb570471c3fdea2c772d85e8d
| 3,643,749
|
import re
def text_pre_process(result):
""" 이미지에서 인식된 글자를 정제 합니다.
특수문자 제거, 1-2단어 제거, 줄바꿈 및 공백 제거
:param result: 이미지에서 인식된 글자
:return: 문자를 전처리한 결과
"""
copy = str(result)
copy2 = copy.replace("\n", "")
copy3 = re.sub('[^ㄱ-힗]', '', copy2)
# re.sub('[^A-Za-z0-9]', '', copy2)
result = re.sub('[-=+,#}/\{:^$.@*\※~&%ㆍ!『「』\\‘|\(\)\[_ ""\]\<\>`\'…》]', '', copy3)
# shortword = re.compile(r'\W*\b\w{1,2}\b')
# shortword.sub('', result)
# text2 = re.sub(r'\d','',result)
if result is not None and len(result) > 3:
# print(result)
return result
|
c9a25fb19a723d38eb19a8a086a2134369223ea1
| 3,643,750
|
from typing import Tuple
import ssl
from datetime import datetime
def push_thread_callback(app: Flask):
"""Process outstanding MDM commands by issuing a push to device(s).
TODO: A push with no response needs an exponential backoff time.
Commands that are ready to send must satisfy these criteria:
- Command is in Queued state.
- Command.after is null.
- Command.ttl is not zero.
- Device is enrolled (is_enrolled)
"""
while not push_thread_stopped.wait(push_time):
app.logger.info('Push Thread checking for outstanding commands...')
with app.app_context():
pending: Tuple[Device, int] = db.session.query(Device, func.Count(Command.id)).\
filter(Device.id == Command.device_id).\
filter(Command.status == CommandStatus.Queued).\
filter(Command.ttl > 0).\
filter(Command.after == None).\
filter(Device.is_enrolled == True).\
group_by(Device.id).\
all()
for d, c in pending:
app.logger.info('PENDING: %d command(s) for device UDID %s', c, d.udid)
if d.token is None or d.push_magic is None:
app.logger.warn('Cannot request push on a device that has no device token or push magic')
continue
try:
response = push_to_device(d)
except ssl.SSLError:
return stop()
app.logger.info("[APNS2 Response] Status: %d, Reason: %s, APNS ID: %s, Timestamp",
response.status_code, response.reason, response.apns_id.decode('utf-8'))
d.last_push_at = datetime.utcnow()
if response.status_code == 200:
d.last_apns_id = response.apns_id
db.session.commit()
|
665b470fa046013e19ca0e58b7b9d58f21f31194
| 3,643,751
|
def get_conventional_std_cell(atoms):
"""Given an ASE atoms object, return the ASE atoms object in the conventional standard cell.
It uses symmetries to find the conventional standard cell.
In particular, it gives a structure with a conventional cell according to the standard defined in
W. Setyawan, and S. Curtarolo, Comput. Mater. Sci.49(2), 299-312 (2010). \n
This is simply a wrapper around the pymatgen implementation:
http://pymatgen.org/_modules/pymatgen/symmetry/analyzer.html
Parameters:
atoms: `ase.Atoms` object
Atomic structure.
Returns:
`ase.Atoms` object
Return the structure in a conventional cell.
.. seealso:: To create a standard cell that it is independent from symmetry operations use
:py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`
.. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com>
"""
# save atoms.info dict otherwise it gets lost in the conversion
atoms_info = atoms.info
mg_structure = AseAtomsAdaptor.get_structure(atoms)
finder = SpacegroupAnalyzer(mg_structure)
mg_structure = finder.get_conventional_standard_structure()
conventional_standard_atoms = AseAtomsAdaptor.get_atoms(mg_structure)
conventional_standard_atoms.info = atoms_info
return conventional_standard_atoms
|
78bf131e8f195bd25b424627d6cce2d5295de248
| 3,643,752
|
def get_if_rcnn(inputs: Tensor):
"""
:param inputs: Tensor from Input Layer
:return:
"""
# get back bone outputs
if_backbones_out = backbones(inputs)
return if_backbones_out
|
02507f10e4dc791b1f201d2c08d3925f2a2dacb5
| 3,643,753
|
import string
import secrets
def method_3(num_chars: int):
"""
Pythonicish way of generating random password
Args:
num_chars (int): Number of Characters the password will be
Returns:
string: The generated password
"""
chars = string.ascii_letters + string.digits + string.punctuation
password = "".join((secrets.choice(chars) for i in range(num_chars)))
return password
|
03fc383ef8c45f1bc618daf4b70646ea824e31df
| 3,643,754
|
def get_animation_for_block(
block_start: int,
frame_num: int,
total_frames: int,
duration: int=5,
):
"""Generate CSS to pop a block from gray to red at the right frame
block_start: int
frame_num: int
total_frames: int
duration: int # seconds"""
animation_function = gray_red_blue
return animation_function(block_start, frame_num, total_frames, duration)
|
9ce9a36a5c7ca5161b89a3306c7e38c9c03d2ee0
| 3,643,755
|
def find_student_by_username(usuario_id, test=False):
"""Consulta toda la información de un estudiante según su usuario."""
query = 'SELECT * FROM estudiante WHERE id_usuario = %s'
return execute_sql(query, args=[usuario_id], rows=1, test=test)
|
ecc67992aef2257d35ccc6dfa05c01afc3d40bb3
| 3,643,756
|
def reduce_mem_usage(df, use_float16=False):
"""
Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
print("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
return df
|
842e9c134cd5211fdbe75b0626efa48f68d90c35
| 3,643,757
|
import argparse
def ParseCommandYAML():
"""Function for parsing command line arguments for input to YAML HDIprep"""
# if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--im", nargs='*')
parser.add_argument("--pars")
parser.add_argument("--out_dir")
args = parser.parse_args()
# Create a dictionary object to pass to the next function
dict = {"im": args.im, "pars": args.pars, "out_dir": args.out_dir}
# Print the dictionary object
print(dict)
# Return the dictionary
return dict
|
3a56d16d960f59f0afd888120c19d12b0b6f25b3
| 3,643,758
|
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
name="global_step",
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
|
786ac18f78f092e099844d99b33f5232f53d8a8a
| 3,643,759
|
def rl_label_weights(name=None):
"""Returns the weight for importance."""
with tf.variable_scope(name, 'rl_op_selection'):
num_classes = get_src_num_classes()
num_choices = FLAGS.num_choices
logits = tf.get_variable(
name='logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[num_classes, num_choices],
dtype=tf.float32)
dist_logits_list = logits.value()
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
sample = dist.sample()
sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices
sample_log_prob = tf.reduce_mean(dist.log_prob(sample))
return (dist_logits_list, dist_entropy, sample_masks, sample_log_prob)
|
b1157c508cb256ab9608ebd78dfbb1ef90cec2b5
| 3,643,760
|
from scipy.stats import mannwhitneyu
from statsmodels.sandbox.stats.multicomp import multipletests
from typing import List
import tqdm
def run_de_test(dataset1: Dataset, dataset2,
test_cells: List[str], control_cells: List[List[str]],
test_label: str = None, control_group_labels: list = None,
exp_frac_thresh: float = 0.25, log2_fc_thresh: float = 1,
qval_thresh: float = 0.05, tqdm_msg: str = '') -> pd.DataFrame:
"""
Identifies differentially expressed genes using Mann Whitney U test.
:param dataset1: nabo.Dataset instance
:param dataset2: nabo.Dataset instance or None
:param test_cells: list of cells for which markers has to be found.
These could be cells from a cluster,cells with high
mapping score, etc
:param control_cells: List of cell groups against which markers need to
be found. This could just one groups of cells or
multiple groups of cells.
:param test_label: Label for test cells.
:param control_group_labels: Labels of control cell groups
:param exp_frac_thresh: Fraction of cells that should have a non zero
value for a gene.
:param log2_fc_thresh: Threshold for log2 fold change
:param qval_thresh: Threshold for adjusted p value
:param tqdm_msg: Message to print while displaying progress
:return: pd.Dataframe
"""
test_cells_idx = [dataset1.cellIdx[x] for x in test_cells]
control_cells_idx_group = []
for i in control_cells:
if dataset2 is None:
control_cells_idx_group.append([dataset1.cellIdx[x] for x in i])
else:
control_cells_idx_group.append([dataset2.cellIdx[x] for x in i])
if test_label is None:
test_label = 'Test group'
if control_group_labels is None:
control_group_labels = ['Ctrl group %d' % x for x in range(len(
control_cells_idx_group))]
num_test_cells = len(test_cells_idx)
num_groups = len(control_cells_idx_group)
min_n = [min(num_test_cells, len(x)) for x in control_cells_idx_group]
n1n2 = [num_test_cells * x for x in min_n]
if dataset2 is None:
valid_genes = {dataset1.genes[x]: None for x in dataset1.keepGenesIdx}
else:
valid_genes = {}
control_gene_list = {x: None for x in dataset2.genes}
for i in dataset1.keepGenesIdx:
gene = dataset1.genes[i]
if gene in control_gene_list:
valid_genes[gene] = None
del control_gene_list
de = []
for gene in tqdm(valid_genes, bar_format=tqdm_bar, desc=tqdm_msg):
rbc, mw_p, log_fc = 0, 1, 0
all_vals = dataset1.get_norm_exp(gene)
test_vals = all_vals[test_cells_idx]
ef = np.nonzero(test_vals)[0].shape[0] / num_test_cells
if ef < exp_frac_thresh:
continue
if dataset2 is None:
all_control_vals = all_vals
else:
all_control_vals = dataset2.get_norm_exp(gene)
log_mean_test_vals = np.log2(test_vals.mean())
for i in range(num_groups):
control_vals = all_control_vals[control_cells_idx_group[i]]
control_vals.sort()
control_vals = control_vals[-min_n[i]:]
mean_control_vals = control_vals.mean()
if mean_control_vals == 0:
log_fc = np.inf
else:
log_fc = log_mean_test_vals - np.log2(mean_control_vals)
if log_fc < log2_fc_thresh:
continue
try:
u, mw_p = mannwhitneyu(test_vals, control_vals)
except ValueError:
pass
else:
rbc = 1 - ((2 * u) / n1n2[i])
de.append((gene, ef, control_group_labels[i], rbc, log_fc, mw_p))
de = pd.DataFrame(de, columns=['gene', 'exp_frac', 'versus_group',
'rbc', 'log2_fc', 'pval'])
if de.shape[0] > 1:
de['qval'] = multipletests(de['pval'].values, method='fdr_bh')[1]
else:
de['qval'] = [np.nan for _ in range(de.shape[0])]
de['test_group'] = [test_label for _ in range(de.shape[0])]
out_order = ['gene', 'exp_frac', 'test_group', 'versus_group',
'rbc', 'log2_fc', 'pval', 'qval']
de = de[out_order].sort_values(by='qval')
return de[(de.qval < qval_thresh)].reset_index().drop(columns=['index'])
|
422082785845918f7e999125b7e57e6c1fbcb535
| 3,643,761
|
def say_hello_twice(subject):
"""Says hello twice using `say_hello`."""
return say_hello(subject) + " " + say_hello(subject)
|
66a6fafca01f6ddc6304fef15aea27bb15c23416
| 3,643,762
|
def get_zones(ec2):
"""
Return all available zones in the region
"""
zones = []
try:
aws_zones = ec2.describe_availability_zones()['AvailabilityZones']
except ClientError as e:
print(e.response['Error']['Message'])
return None
for zone in aws_zones:
if zone['State'] == 'available':
zones.append(zone['ZoneName'])
return zones
|
acd023bcf5863aff0cd562f6c097062d9693738d
| 3,643,763
|
import torch
def x_gate():
"""
Pauli x
"""
return torch.tensor([[0, 1], [1, 0]]) + 0j
|
736d72d832380ea5a1d6c4a840cb6aa0050638e5
| 3,643,764
|
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None):
"""Merges user_input_dictionary into default dictionary;
default values will be overwritten by users input."""
return {**default_dictionary, **user_input_dictionary}
|
ea600efcd69e920ae536fa2f22a4c883a71d8ad3
| 3,643,765
|
def create_frequencyvector(T_end, f_max_requested):
""" A function to create the vector of frequencies we need to solve using the reflectivity
method, to achieve the desired length of time and highest modelled frequency.
NOTE: Because we require the number of frequencies to be odd, the maximum frequency may
change.
Returns the frequency vector and the corresponding time step dt.
"""
# T_end : End time of simulation
# f_max_requested : Maximum desired frequency to be modelled
# Minimum modelled frequency (always 0 for now)
f_min = 0
# Frequency resolution
df = 1 / T_end
# Number of frequencies (round up if needed), + 1 for the first frequency (zero)
n_f = np.ceil((f_max_requested - f_min) / df) + 1
n_f = n_f.astype(int)
# Make sure the number of frequencies is odd
if n_f % 2 != 1:
n_f += 1
# Maximum modelled frequency (accurate), -1 for the first frequency which is zero
f_max_actual = (n_f - 1) * df
assert f_max_actual >= f_max_requested, 'Actual frequency too low'
dt = 1 / (2 * f_max_actual)
freq = np.linspace(0, f_max_actual, n_f)
return freq, dt
|
d402840259bdc0049c580e057a6de815dfaa02f1
| 3,643,766
|
def get_fiber_protein_intake(
nutrients_lower_lists, nutrients_middle_lists,nutrients_upper_lists):
"""Gets financial class-wise fibee and protein intake data."""
lower_fiber_prot = nutrients_lower_lists.map(lambda x: (x[1], x[3]))
middle_fiber_prot = nutrients_middle_lists.map(lambda x: (x[1], x[3]))
upper_fiber_prot = nutrients_upper_lists.map(lambda x: (x[1], x[3]))
return lower_fiber_prot, middle_fiber_prot, upper_fiber_prot
|
990293236a10ed18960393b39dbfb46652fca51d
| 3,643,767
|
def _add_fvar(font, axes, instances, axis_map):
"""
Add 'fvar' table to font.
axes is a dictionary mapping axis-id to axis (min,default,max)
coordinate values.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
axisMap is dictionary mapping axis-id to (axis-tag, axis-name).
"""
assert "fvar" not in font
font['fvar'] = fvar = newTable('fvar')
nameTable = font['name']
for iden in sorted(axes.keys(), key=lambda k: axis_map[k][0]):
axis = Axis()
axis.axisTag = Tag(axis_map[iden][0])
axis.minValue, axis.defaultValue, axis.maxValue = axes[iden]
axisName = tounicode(axis_map[iden][1])
axis.axisNameID = nameTable.addName(axisName)
fvar.axes.append(axis)
for instance in instances:
coordinates = instance['location']
name = tounicode(instance['stylename'])
psname = instance.get('postscriptfontname')
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addName(name)
if psname is not None:
psname = tounicode(psname)
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = {axis_map[k][0]:v for k,v in coordinates.items()}
fvar.instances.append(inst)
return fvar
|
20836c91121603610f5bfb19777e4b6f440f2007
| 3,643,768
|
import tqdm
def init_nornir(username, password):
"""INITIALIZES NORNIR SESSIONS"""
nr = InitNornir(
config_file="network_automation/topology_builder/graphviz/config/config.yml"
)
nr.inventory.defaults.username = username
nr.inventory.defaults.password = password
managed_devs = nr.filter(F(groups__contains="ios_devices") | F(groups__contains="nxos_devices"))
with tqdm(total=len(managed_devs.inventory.hosts)) as progress_bar:
results = managed_devs.run(task=get_data_task, progress_bar=progress_bar)
hosts_failed = list(results.failed_hosts.keys())
if hosts_failed != []:
auth_fail_list = list(results.failed_hosts.keys())
for dev in auth_fail_list:
dev_auth_fail_list.add(dev)
print(f"Authentication Failed: {auth_fail_list}")
print(
f"{len(list(results.failed_hosts.keys()))}/{len(managed_devs.inventory.hosts)} devices failed authentication..."
)
return managed_devs, results, dev_auth_fail_list
|
edf6a45a2ce9dc7799351892c1b53ab2b949607c
| 3,643,769
|
def _format_rest_url(host: str, append: str = "") -> str:
"""Return URL used for rest commands."""
return f"http://{host}:8001/api/v2/{append}"
|
1d5ace3919da004e648cb6c7d6d80fe72903c0e1
| 3,643,770
|
from typing import Union
import torch
import os
import warnings
def load(name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit: bool = False,
download_root: str = None):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)),
example_inputs=[])
device_node = [
n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
|
be42a21f40d97e559e1e1a65b09ca45b57fd88b9
| 3,643,771
|
from typing import Optional
from typing import Set
def get_synonyms(prefix: str) -> Optional[Set[str]]:
"""Get the synonyms for a given prefix, if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_synonyms()
|
44e4fc7259f9536dc192a08566b8db7f9256f916
| 3,643,772
|
def results(request):
""" Returns the actual body of the search results, for AJAX stuff """
query = request.GET.get("q", "")
if len(query) >= 4:
ctx = _search_context(query, request.user)
return TemplateResponse(request, "search/results.html", ctx)
return TemplateResponse(request, "search/too_short.html", {})
|
a6282dd489e3406ebc8b2349159b58d4cb0e1fd4
| 3,643,773
|
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
|
18afa0cc24f5d9205cde3c8ad23f70d73b5c395b
| 3,643,774
|
def find_password(liste, login):
""" """
for user in liste:
if user[0] == login:
return user[1]
return None
|
8f61072a8b1cc34eb27c1665b1cd34aeb6630ce2
| 3,643,775
|
def sample_weather_scenario():
"""
Generate a weather scenario with known values for the wind condition.
"""
times = pd.date_range('1/1/2000', periods=72, freq='6H')
latitude = np.linspace(0, 10, 11)
longitude = np.linspace(0, 10, 11)
wsp_vals = np.full((72, 11, 11), 10.0)
wdi_vals = np.full((72, 11, 11), 0.0)
cusp_vals = np.full((72, 11, 11), 0.0)
cudi_vals = np.full((72, 11, 11), 0.0)
wadi_vals = np.full((72, 11, 11), 0.0)
wahi_vals = np.full((72, 11, 11), 0.0)
wisp = xr.DataArray(wsp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
widi = xr.DataArray(wdi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cusp = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cudi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wahi = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wadi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
return wisp, widi, cusp, cudi, wahi, wadi
|
124f43b090149bb23e52a88a03c441d1311c5bea
| 3,643,776
|
import os
def parse_csv_file(csv_filepath, expect_negative_correlation = False, STDev_cutoff = 1.0, headers_start_with = 'ID', comments_start_with = None, separator = ','):
"""
Analyzes a CSV file.
Expects a CSV file with a header line starting with headers_start_with e.g. "ID,experimental value, prediction 1 value, prediction 2 value,"
Record IDs are expected in the first column.
Experimental values are expected in the second column.
Predicted values are expected in the subsequent columns.
:param csv_filepath: The path to a CSV file containing experimental and predicted data for some dataset.
:param expect_negative_correlation: See parse_csv.
:param STDev_cutoff: See parse_csv.
:param headers_start_with: See parse_csv.
:param comments_start_with: See parse_csv.
:param separator: See parse_csv.
"""
assert (os.path.exists(csv_filepath))
return parse_csv(get_file_lines(csv_filepath),
expect_negative_correlation = expect_negative_correlation, STDev_cutoff = STDev_cutoff, headers_start_with = headers_start_with,
comments_start_with = comments_start_with, separator = separator)
|
976180982335a69451971b85cd4d411d56da7844
| 3,643,777
|
import torch
def ToTensor(pic):
"""Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if pic.mode == "I":
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == "I;16":
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if pic.mode == "YCbCr":
nchannel = 3
elif pic.mode == "I;16":
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
c3ed682c520f17f24169377b2a3016510e9724f9
| 3,643,778
|
def part2(data):
"""
>>> part2([[43, 19], [2, 29, 14]])
105
>>> part2([[9, 2, 6, 3, 1], [5, 8, 4, 7, 10]])
291
>>> part2(read_input())
32528
"""
deck_one = tuple(data[0])
deck_two = tuple(data[1])
_, winning_deck = combat(deck_one, deck_two)
return score(winning_deck)
|
226eb835030716cdfa30310e53bc76c5dd3a4e7e
| 3,643,779
|
from pathlib import Path
def validate_recording(
ai_file_path, ephys_ap_data_path, debug=False, sampling_rate=30000
):
"""
Checks that an ephys recording and bonsai behavior recording
are correctly syncd. To do this:
1. check that number of recording sync signal pulses is the same for both sources
Arguments:
ai_file_pat: str. Path to .bin analog inputs file
ephys_ap_data_path: str. Path to .bin with AP ephys data.
"""
name = Path(ai_file_path).name
logger.debug(f"\nRunning validate RECORDING on {name}")
# load analog from bonsai
bonsai_probe_sync = load_or_open(
ephys_ap_data_path, "bonsai", ai_file_path, 3
)
# load data from ephys (from local file if possible)
ephys_probe_sync = load_or_open(
ephys_ap_data_path,
"ephys",
get_recording_local_copy(ephys_ap_data_path),
-1,
order="F",
dtype="int16",
nsigs=385,
)
# check for aberrant signals in bonsai
errors = np.where((bonsai_probe_sync < -0.1) & (bonsai_probe_sync > 5.1))[
0
]
if len(errors):
logger.info(
f"Found {len(errors)} samples with too high values in bonsai probe signal"
)
if len(errors) > 1000:
logger.warning(f"This value seems to long, retuirning gailure")
return False, 0, 0, "too_many_errors_in_behavior_sync_signal"
bonsai_probe_sync[errors] = bonsai_probe_sync[errors - 1]
# check for aberrant signals in ephys
errors = np.where(ephys_probe_sync > 75)[0]
if len(errors):
logger.warning(
f"Found {len(errors)} samples with too high values in probe signal"
)
if len(errors) > 1000:
return False, 0, 0, "too_many_errors_in_ephys_sync_signal"
ephys_probe_sync[errors] = ephys_probe_sync[errors - 1]
# find probe sync pulses in both
(
is_ok,
bonsai_sync_onsets,
bonsai_sync_offsets,
ephys_sync_onsets,
ephys_sync_offsets,
) = get_onsets_offsets(bonsai_probe_sync, ephys_probe_sync, sampling_rate)
# get time scaling factor
time_scaling_factor = 1 / (
(ephys_sync_onsets[-1] - ephys_sync_onsets[0])
/ (bonsai_sync_onsets[-1] - bonsai_sync_onsets[0])
)
# debugging plots
if debug or not is_ok:
plot_recording_triggers(
bonsai_probe_sync,
ephys_probe_sync,
bonsai_sync_onsets,
bonsai_sync_offsets,
ephys_sync_onsets,
ephys_sync_offsets,
sampling_rate,
time_scaling_factor,
)
# plt.show()
return is_ok, ephys_sync_onsets[0], time_scaling_factor, "nothing"
|
269e66a6fc06490c8cbd9f983bc0c57f090c3671
| 3,643,780
|
def dehyphenate(string):
"""Remove hyphenated linebreaks from 'string'."""
return hyphen_newline_re.sub("", string)
|
6894fd7972c3990fd00e5818e0b30e48e78017e0
| 3,643,781
|
def grounder(img, dtype=None):
"""Tries to remove absolute offset
'img' must be a 3 colors image"""
shape = img.shape
"""
# Mise en forme
a = img.reshape((shape[0] * shape[1], 3))
min = np.zeros(a.shape)
max = np.zeros(a.shape)
# Minimas/maximas
min[:,0] = min[:,1] = min[:,2] = a.min(axis=1)
max[:,0] = max[:,1] = max[:,2] = a.max(axis=1)
# Remise en forme
min = min.reshape(shape)
max = max.reshape(shape)
# Remise au ras du sol
grounded = img - min
# return (grounded / max).astype(np.float32)
return (grounded / 255.0).astype(np.float32)
"""#"""
min = coloroffset(img)
grounded = img - min
if dtype is not None:
grounded = grouded.astype(dtype)
return grounded
|
55a022a26f457cf0ec76d4ed8fa37f470db31e11
| 3,643,782
|
def arch_prob(arch, dims, **kwds):
""" Returns the combined probability of for arch given values """
values = dict(kwds)
dimkeys = list(dims.keys())
assert isinstance(arch, (tuple, list)), "Archictecture must be tuple or list"
serial = isinstance(arch, list)
probs = [None] * len(arch)
for i, subarch in enumerate(arch):
keyset = subarch.keylist
vals = {key: values[key] for key in dimkeys if key in keyset}
subdims = {key: dims[key] for key in dimkeys if key in keyset}
probs[i] = subarch.eval_prob(vals, subdims)
if serial:
return probs[-1]
pscales = [subarch.pscale for subarch in arch]
prob, pscale = prod_rule(*tuple(probs), pscales=pscales)
return prob
|
94778c471d4ae3fe534af50543ef9465a6b2e793
| 3,643,783
|
def get_bin_values(base_dataset, bin_value):
"""Gets the values to be used when sorting into bins for the given dataset, from the configured options."""
values = None
if bin_value == "results":
values = base_dataset.get_output()
elif bin_value == "all":
# We set all values to 0, assuming single bin will also set its value to 0.
values = [0] * base_dataset.get_number_of_samples()
else:
raise Exception(f"Invalid bin value configured: {bin_value}")
return values
|
cf2419066d6e642e65d9a8747081ebfee417ed64
| 3,643,784
|
def get_reviews(revision_range):
"""Returns the list of reviews found in the commits in the revision range.
"""
log = check_output(['git',
'--no-pager',
'log',
'--no-color',
'--reverse',
revision_range]).strip()
review_ids = []
for line in log.split('\n'):
pos = line.find('Review: ')
if pos != -1:
pattern = re.compile('Review: ({url})$'.format(
url=os.path.join(REVIEWBOARD_URL, 'r', '[0-9]+')))
match = pattern.search(line.strip().strip('/'))
if match is None:
print "\nInvalid ReviewBoard URL: '{}'".format(line[pos:])
sys.exit(1)
url = match.group(1)
review_ids.append(os.path.basename(url))
return review_ids
|
0ff81eef45fb123e25dc7662f320e49fac7aa378
| 3,643,785
|
def create_cert_req(keyType=crypto.TYPE_RSA,
bits=1024,
messageDigest="md5"):
"""
Create certificate request.
Returns: certificate request PEM text, private key PEM text
"""
# Create certificate request
req = crypto.X509Req()
# Generate private key
pkey = crypto.PKey()
pkey.generate_key(keyType, bits)
req.set_pubkey(pkey)
req.sign(pkey, messageDigest)
return (crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req),
crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
|
168fd8c7cde30730cdc9e74e5fbf7619783b29c9
| 3,643,786
|
def large_xyz_to_lab_star(large_xyz, white=const_d50_large_xyz):
"""
# 概要
L*a*b* から XYZ値を算出する
# 入力データ
numpy形式。shape = (N, M, 3)
# 参考
https://en.wikipedia.org/wiki/Lab_color_space
"""
if not common.is_img_shape(large_xyz):
raise TypeError('large_xyz shape must be (N, M, 3)')
x, y, z = np.dsplit(large_xyz, 3)
white = [x / white[1] for x in white]
l = 116 * _func_t(y/white[1]) - 16
a = 500 * (_func_t(x/white[0]) - _func_t(y/white[1]))
b = 200 * (_func_t(y/white[1]) - _func_t(z/white[2]))
return np.dstack((l, a, b))
|
aec3cb423698954aa07a61bf484e1acd8e38d5db
| 3,643,787
|
from typing import Any
def return_value(value: Any) -> ObservableBase:
"""Returns an observable sequence that contains a single element,
using the specified scheduler to send out observer messages.
There is an alias called 'just'.
example
res = rx.Observable.return(42)
res = rx.Observable.return(42, rx.Scheduler.timeout)
Keyword arguments:
value -- Single element in the resulting observable sequence.
Returns an observable sequence containing the single specified
element.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or current_thread_scheduler
def action(scheduler, state=None):
observer.on_next(value)
observer.on_completed()
return scheduler.schedule(action)
return AnonymousObservable(subscribe)
|
e14ac3a08a3f127b77f57b7192a8f362ec3485b2
| 3,643,788
|
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
|
e69ecaa051602e2d9eab0695f62b391a9aca17ad
| 3,643,789
|
def meanPSD(d0,win=np.hanning,dx=1.,axis=0,irregular=False,returnInd=False,minpx=10):
"""Return the 1D PSD averaged over a surface.
Axis indicates the axis over which to FFT
If irregular is True, each slice will be stripped
and then the power spectra
interpolated to common frequency grid
Presume image has already been interpolated internally
If returnInd is true, return array of power spectra
Ignores slices with less than minpx non-nans
"""
#Handle which axis is transformed
if axis==0:
d0 = np.transpose(d0)
#Create list of slices
if irregular is True:
d0 = [stripnans(di) for di in d0]
else:
d0 = [di for di in d0]
#Create power spectra from each slice
pows = [realPSD(s,win=win,dx=dx,minpx=minpx) for s in d0 \
if np.sum(~np.isnan(s)) >= minpx]
#Interpolate onto common frequency grid of shortest slice
if irregular is True:
#Determine smallest frequency grid
ln = [len(s[0]) for s in pows]
freq = pows[np.argmin(ln)][0]
#Interpolate
pp = [griddata(p[0],p[1],freq) for p in pows]
else:
pp = [p[1] for p in pows]
freq = pows[0][0]
#Average
pa = np.mean(pp,axis=0)
if returnInd is True:
return freq,pp
return freq,pa
|
99d6ab3e8ef505f031346db10762a195904b455e
| 3,643,790
|
async def get_temperatures(obj):
"""Get temperatures as read by the thermostat."""
return await obj["madoka"].temperatures.query()
|
b4643d9c40f6aa8953c598dd572d291948ef34a4
| 3,643,791
|
import itertools
def get_zero_to_2pi_input(label, required, placeholder=None, initial=None, validators=()):
"""
Method to get a custom positive float number field
:param label: String label of the field
:param required: Boolean to define whether the field is required or not
:param placeholder: Placeholder to appear in the field
:param initial: Default input value for the field
:param validators: validators that should be attached with the field
:return: A custom floating number field that accepts only numbers greater than zero and less than 2pi(Math.pi)
"""
default_validators = [validate_positive_float, validate_less_than_2pi, ]
return CustomFloatField(
label=label,
required=required,
initial=initial,
placeholder=placeholder,
validators=list(itertools.chain(default_validators, validators)),
)
|
d1349088d8b2c29ecc07bdb6900ff335384e3c30
| 3,643,792
|
def compile_math(math):
""" Compile a mathematical expression
Args:
math (:obj:`str`): mathematical expression
Returns:
:obj:`_ast.Expression`: compiled expression
"""
math_node = evalidate.evalidate(math,
addnodes=[
'Eq', 'NotEq', 'Gt', 'Lt', 'GtE', 'LtE',
'Sub', 'Mult', 'Div' 'Pow',
'And', 'Or', 'Not',
'BitAnd', 'BitOr', 'BitXor',
'Call',
],
funcs=MATHEMATICAL_FUNCTIONS.keys())
compiled_math = compile(math_node, '<math>', 'eval')
return compiled_math
|
511c281a03591ed5b84e216f3edb1503537cbb86
| 3,643,793
|
from typing import Optional
from typing import Union
from typing import List
import click
def colfilter(
data,
skip: Optional[Union[str, List[str]]] = None,
only: Optional[Union[str, List[str]]] = None,
):
"""
Remove some variables (skip) or keep only certain variables (only)
Parameters
----------
data: pd.DataFrame
The DataFrame to be processed and returned
skip: str, list or None (default is None)
List of variables to remove
only: str, list or None (default is None)
List of variables to keep
Returns
-------
data: pd.DataFrame
The filtered DataFrame
Examples
--------
>>> import clarite
>>> female_logBMI = clarite.modify.colfilter(nhanes, only=['BMXBMI', 'female'])
================================================================================
Running colfilter
--------------------------------------------------------------------------------
Keeping 2 of 945 variables:
0 of 0 binary variables
0 of 0 categorical variables
2 of 945 continuous variables
0 of 0 unknown variables
================================================================================
"""
boolean_keep = _validate_skip_only(data, skip, only)
dtypes = _get_dtypes(data)
click.echo(f"Keeping {boolean_keep.sum():,} of {len(data.columns):,} variables:")
for kind in ["binary", "categorical", "continuous", "unknown"]:
is_kind = dtypes == kind
is_kept = is_kind & boolean_keep
click.echo(f"\t{is_kept.sum():,} of {is_kind.sum():,} {kind} variables")
return data.loc[:, boolean_keep]
|
16c901f514afb1990e43c470c7e089eab5b4eb56
| 3,643,794
|
import math
def acos(x):
"""
"""
return math.acos(x)
|
0a8ca8f716f0ea54b558ca27021830480dac662d
| 3,643,795
|
def get_callable_from_string(f_name):
"""Takes a string containing a function name (optionally module qualified) and returns a callable object"""
try:
mod_name, func_name = get_mod_func(f_name)
if mod_name == "" and func_name == "":
raise AttributeError("%s couldn't be converted to a module or function name" % f_name)
module = __import__(mod_name)
if func_name == "":
func_name = mod_name # The common case is an eponymous class
return getattr(module, func_name)
except (ImportError, AttributeError), exc:
raise RuntimeError("Unable to create a callable object for '%s': %s" % (f_name, exc))
|
ef1ae8d4c1da06e38a6029e0caa51b4e3fb5b95c
| 3,643,796
|
from typing import List
import bisect
def binary_get_bucket_for_node(buckets: List[KBucket], node: Node) -> KBucket:
"""Given a list of ordered buckets, returns the bucket for a given node."""
bucket_ends = [bucket.end for bucket in buckets]
bucket_position = bisect.bisect_left(bucket_ends, node.id)
# Prevents edge cases where bisect_left returns an out of range index
try:
bucket = buckets[bucket_position]
assert bucket.start <= node.id <= bucket.end
return bucket
except (IndexError, AssertionError):
raise ValueError("No bucket found for node with id {}".format(node.id))
|
ff1fc765c56e67af3c33798b403779f7aafb6bb0
| 3,643,797
|
def darken(color, factor=0.7):
"""Return darkened color as a ReportLab RGB color.
Take a passed color and returns a Reportlab color that is darker by the
factor indicated in the parameter.
"""
newcol = color_to_reportlab(color)
for a in ["red", "green", "blue"]:
setattr(newcol, a, factor * getattr(newcol, a))
return newcol
|
bcb937409a6790c6ac04a1550654e9b4fc398f9f
| 3,643,798
|
def fetch_all_tiles(session):
"""Fetch all tiles."""
return session.query(Tile).all()
|
15e21dff372859ad07f76d97944b9a002f44a35e
| 3,643,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.