content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def loglikehood_coefficient(n_items, X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Parameters
----------
n_items: int
Number of items in the model.
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from scikits.crab.metrics.pairwise import loglikehood_coefficient
>>> X = [['a', 'b', 'c', 'd'], ['e', 'f','g', 'h']]
>>> # distance between rows of X
>>> n_items = 7
>>> loglikehood_coefficient(n_items,X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> n_items = 8
>>> loglikehood_coefficient(n_items, X, [['a', 'b', 'c', 'k']])
array([[ 0.67668852],
[ 0. ]])
References
----------
See http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.5962 and
http://tdunning.blogspot.com/2008/03/surprise-and-coincidence.html.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
def safeLog(d):
if d <= 0.0:
return 0.0
else:
return np.log(d)
def logL(p, k, n):
return k * safeLog(p) + (n - k) * safeLog(1.0 - p)
def twoLogLambda(k1, k2, n1, n2):
p = (k1 + k2) / (n1 + n2)
return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2)
- logL(p, k1, n1) - logL(p, k2, n2))
if X is Y:
X = Y = np.asanyarray(X)
else:
X = np.asanyarray(X)
Y = np.asanyarray(Y)
result = []
# TODO: Check if it is possible to optimize this function
i = 0
for arrayX in X:
result.append([])
for arrayY in Y:
XY = np.intersect1d(arrayX, arrayY)
if XY.size == 0:
result[i].append(0.0)
else:
nX = arrayX.size
nY = arrayY.size
if (nX - XY.size == 0) or (n_items - nY) == 0:
result[i].append(1.0)
else:
logLikelihood = twoLogLambda(float(XY.size),
float(nX - XY.size),
float(nY),
float(n_items - nY))
result[i].append(1.0 - 1.0 / (1.0 + float(logLikelihood)))
result[i] = np.asanyarray(result[i])
i += 1
return np.asanyarray(result) | 5,334,300 |
def get_archive_by_path(db, vault_name, path, retrieve_subpath_archs=False):
"""
Will attempt to find the most recent version of an archive representing a given path.
If retrieve_subpath_archs is True, then will also retrieve latest versions of archives representing
subdirs of the path.
:param path: The path whose contents we want to retrieve, relative to the top_dir that was backed up.
:param retrieve_subpath_archs: If True, will return a list of all of the archives of subdirectories below the `path`
in the directory tree
:return: archive, list
"""
if not retrieve_subpath_archs:
get_most_recent_version_of_archive(db, vault_name, path)
else:
# When trying to find subdirectories, the daft assumption that we make is that the 'path' of the archive will
# start with `path` and be longer than `path`. It'll work for now, but seems inelegant...
path_list = get_list_of_paths_in_vault(db, vault_name)
subdir_list = []
while len(path_list):
cur_path = path_list.pop()
if cur_path.startswith(path) and len(cur_path) >= len(path):
subdir_list.append(cur_path)
arch_list = []
for subdir in subdir_list:
arch = get_most_recent_version_of_archive(db, vault_name, subdir)
if arch: arch_list.append(arch)
return arch_list | 5,334,301 |
def superpixel_colors(
num_pix:int = 1536,
schema:str = 'rgb',
interleave:int = 1,
stroke:str = '',
) -> list:
"""
Generate color (attribute) list for superpixel SVG paths
Parameters
----------
num_pix : int
Number of super pixels to account for (default = 1536)
schema : str
Either of 'rgb' or 'random'
interleave : int
RGB interleave value (default = 1)
stroke : str
String that is inserted into ever attribute at the end, e.g.
to account for a stroke, such as 'stroke="#808080"'. Please
note that the entire tag=value (pairs) must be given!
Returns
-------
colors : list
List of attributes suitable for superpixel_outlines (SVG)
"""
colors = [''] * num_pix
if not schema in ['random', 'rgb']:
raise ValueError('invalid schema requested.')
if schema == 'rgb':
if stroke:
for idx in range(num_pix):
val = interleave * idx
colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}" {3:s}'.format(
val % 256, (val // 256) % 256, (val // 65536) % 256, stroke)
else:
for idx in range(num_pix):
val = interleave * idx
colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}"'.format(
val % 256, (val // 256) % 256, (val // 65536) % 256)
else:
# IMPORT DONE HERE TO SAVE TIME AT MODULE INIT
import random
if stroke:
for idx in range(num_pix):
colors[idx] = 'fill="#{0:06x} {1:s}"'.format(
random.randrange(16777216), stroke)
else:
for idx in range(num_pix):
colors[idx] = 'fill="#{0:06x}"'.format(
random.randrange(16777216))
return colors | 5,334,302 |
def build_module_op_list(m: tq.QuantumModule, x=None) -> List:
"""
serialize all operations in the module and generate a list with
[{'name': RX, 'has_params': True, 'trainable': True, 'wires': [0],
n_wires: 1, 'params': [array([[0.01]])]}]
so that an identity module can be reconstructed
The module needs to have static support
"""
m.static_off()
m.static_on(wires_per_block=None)
m.is_graph_top = False
# forward to register all modules and parameters
if x is None:
m.forward(q_device=None)
else:
m.forward(q_device=None, x=x)
m.is_graph_top = True
m.graph.build_flat_module_list()
module_list = m.graph.flat_module_list
m.static_off()
op_list = []
for module in module_list:
if module.params is not None:
if module.params.shape[0] > 1:
# more than one param, so it is from classical input with
# batch mode
assert not module.has_params
params = None
else:
# has quantum params, batch has to be 1
params = module.params[0].data.cpu().numpy()
else:
params = None
op_list.append({
'name': module.name.lower(),
'has_params': module.has_params,
'trainable': module.trainable,
'wires': module.wires,
'n_wires': module.n_wires,
'params': params
})
return op_list | 5,334,303 |
def test_support_different_io_size(cache_mode):
"""
title: OpenCAS supports different IO sizes
description: OpenCAS supports IO of size in rage from 512b to 128K
pass_criteria:
- No IO errors
"""
with TestRun.step("Prepare devices"):
cache_disk = TestRun.disks["cache"]
core_disk = TestRun.disks["core"]
cache_disk.create_partitions([Size(1, Unit.GibiByte)])
cache_disk = cache_disk.partitions[0]
core_disk.create_partitions([Size(50, Unit.GibiByte)])
core_disk = core_disk.partitions[0]
with TestRun.step("Start cache"):
cache = casadm.start_cache(cache_dev=cache_disk, cache_mode=cache_mode, force=True)
core = cache.add_core(core_disk)
with TestRun.step("Load the default ioclass config file"):
cache.load_io_class("/etc/opencas/ioclass-config.csv")
with TestRun.step("Create a filesystem on the core device and mount it"):
TestRun.executor.run(f"rm -rf {mountpoint}")
fs_utils.create_directory(path=mountpoint)
core.create_filesystem(Filesystem.xfs)
core.mount(mountpoint)
with TestRun.step("Run fio with block sizes: 512, 1k, 4k, 5k, 8k, 16k, 32k, 64 and 128k"):
bs_list = [
Size(512, Unit.Byte),
Size(1, Unit.KibiByte),
Size(4, Unit.KibiByte),
Size(5, Unit.KibiByte),
Size(8, Unit.KibiByte),
Size(16, Unit.KibiByte),
Size(32, Unit.KibiByte),
Size(64, Unit.KibiByte),
Size(128, Unit.KibiByte),
]
fio = Fio().create_command()
fio.io_engine(IoEngine.libaio)
fio.time_based()
fio.do_verify()
fio.direct()
fio.read_write(ReadWrite.randwrite)
fio.run_time(datetime.timedelta(seconds=1200))
fio.io_depth(16)
fio.verify_pattern(0xABCD)
for i, bs in enumerate(bs_list):
fio_job = fio.add_job()
fio_job.target(os.path.join(mountpoint, str(bs.value)))
fio_job.block_size(bs)
fio_job.file_size(Size((i + 1) * 200, Unit.MebiByte))
fio_output = fio.run()
fio_errors = fio_output[0].total_errors()
if fio_errors != 0:
TestRun.fail(f"fio errors: {fio_errors}, should equal 0")
with TestRun.step("Cleanup"):
core.unmount()
TestRun.executor.run(f"rm -rf {mountpoint}") | 5,334,304 |
def _get_args():
""" Parses the command line arguments and returns them. """
parser = argparse.ArgumentParser(description=__doc__)
# Argument for the mode of execution (human or random):
parser.add_argument(
"--mode", "-m",
type=str,
default="human",
choices=["human", 'random'],
help="The execution mode for the game.",
)
return parser.parse_args() | 5,334,305 |
def test_no_lstrip_blocks(gen_paths: typing.Any, run_nnvg: typing.Callable) -> None:
""" Ensure that lstrip_blocks if false if --lstrip-blocks is not supplied.
"""
testtype_path = get_path_to_TestType_0_2(gen_paths)
nnvg_args0 = ['--templates', str(gen_paths.templates_dir),
'-O', str(gen_paths.out_dir),
'-e', '.json',
str(gen_paths.dsdl_dir / pathlib.Path("uavcan"))]
run_nnvg(gen_paths, nnvg_args0)
with open(str(testtype_path), 'r') as testtype_file:
lines = testtype_file.readlines()
assert " \n" == lines[2] | 5,334,306 |
def file_to_dict(filename, data):
"""Converts JSON file to dict
:param filename: filename
:param data: string
:return: dict object
"""
try:
try:
json_data = to_json(data)
return json.loads(json_data)
except Exception as _:
return json.loads(data)
except Exception as error:
logger.error("Failed to parse s3 file {}, error: {}".format(filename, str(error)))
raise ValueError("Unable to load JSON file {} error: {}".format(filename, str(error))) | 5,334,307 |
def reorganizeArray(id_A, id_B, gids_tuple, unordered_coordinates ):
"""
From a tuple of genome ID's representing a coordinate array's structure (gidI, gidJ).
and the corresponding coordinate np-array (n x 4) of design [[locI1, locJ1, fidI1, fidJ2, Kn, Ks],
...
[locIn, locJn, fidIn, fidJn, Kn, Ks]]
* Both of these are returned by getPointCoords - as "ids" and "coords", respectively.
Restructures to represent an (A, B) order (swaps columns 0,1 and 2,3 if A=J and B=I, else unchanged)
:param id_A: GID of genome to appear first
:param id_B: GID of genome to appear second
:param gids_tuple: Ordered tuple of GIDs in original coordinate array (returned by getPointCoords)
:param unordered_coordinates: coordinate (ordered or unordered) to map order to (returned by getPointCoords)
:return: coordinate object with [A_loc, B_loc, A_fid, B_fid] order enforced.
"""
reorg_start = datetime.now()
if id_A == gids_tuple[0] and id_B == gids_tuple[1]:
ordered_coordinates = unordered_coordinates
elif id_A == gids_tuple[1] and id_B == gids_tuple[0]:
ordered_coordinates = unordered_coordinates[:,[1,0,3,2,4,5]]
else:
ordered_coordinates = None
print "ERROR: reorganizeArray (%s, %s)" % (str(id_A), str(id_B))
exit()
reorg_end = datetime.now()
print("Coordinates Reorganization Complete (%s)" % str(reorg_end-reorg_start))
print("--> (%s,%s) to (%s,%s)" % (gids_tuple[0], gids_tuple[1], id_A, id_B))
return ordered_coordinates | 5,334,308 |
def get_table_entries(table, ports):
"""Fills the dict with the ports that aren't
unassigned
"""
for row in table.find_all('tr')[2:]:
items = row.find_all('td')
if items[3].text.encode('utf-8') != b'Unassigned':
ports[items[0].text.encode('utf-8')] =\
minify(items[3].text).encode('utf-8') | 5,334,309 |
def test_extract_x_all():
"""Test all know variants."""
allitems = (
"RFC1234",
("RFC 2345", "RFC2345"),
"IEEE 802.1x",
("801.2x", "IEEE 801.2x"),
"ITU-T G.1111.1",
"3GPP Release 11",
"GR-1111-CORE",
"ITU-T I.111",
"gnmi.proto version 0.0.1",
"a-something-mib",
"openconfig-a-global.yang version 1.1.1",
"ANSI T1.101.11",
)
txt = ""
exp = []
for itm in allitems:
if isinstance(itm, tuple):
txt += itm[0] + " "
exp.append(itm[1])
else:
txt += itm + " "
exp.append(itm)
std = list(ietf.extract_standards_ordered(txt))
assert std == exp | 5,334,310 |
def compile_model_output(i,j,files,model):
""" compiles the model variables over severl files into a single array at a j,i grid point.
Model can be "Operational", "Operational_old", "GEM".
returns wind speed, wind direction, time,pressure, temperature, solar radiation, thermal radiation and humidity.
"""
wind=[]; direc=[]; t=[]; pr=[]; sol=[]; the=[]; pre=[]; tem=[]; qr=[];
for f in files:
G = nc.Dataset(f)
u = G.variables['u_wind'][:,j,i]; v=G.variables['v_wind'][:,j,i];
pr.append(G.variables['atmpres'][:,j,i]); sol.append(G.variables['solar'][:,j,i]);
qr.append(G.variables['qair'][:,j,i]); the.append(G.variables['therm_rad'][:,j,i]);
pre.append(G.variables['precip'][:,j,i]);
tem.append(G.variables['tair'][:,j,i])
speed = np.sqrt(u**2 + v**2)
wind.append(speed)
d = np.arctan2(v, u)
d = np.rad2deg(d + (d<0)*2*np.pi);
direc.append(d)
ts=G.variables['time_counter']
if model =='GEM':
torig = nc_tools.time_origin(G)
elif model =='Operational' or model=='Operational_old':
torig = datetime.datetime(1970,1,1) #there is no time_origin attriubte in OP files, so I hard coded this
for ind in np.arange(ts.shape[0]):
t.append((torig + datetime.timedelta(seconds=ts[ind])).datetime)
wind = np.array(wind).reshape(len(filesGEM)*24,)
direc = np.array(direc,'double').reshape(len(filesGEM)*24,)
t = np.array(t).reshape(len(filesGEM)*24,)
pr = np.array(pr).reshape(len(filesGEM)*24,)
tem = np.array(tem).reshape(len(filesGEM)*24,)
sol = np.array(sol).reshape(len(filesGEM)*24,)
the = np.array(the).reshape(len(filesGEM)*24,)
qr = np.array(qr).reshape(len(filesGEM)*24,)
pre = np.array(pre).reshape(len(filesGEM)*24,)
return wind, direc, t, pr, tem, sol, the, qr, pre | 5,334,311 |
def make_config(experiment: sacred.Experiment) -> None:
"""Adds configs and named configs to `experiment`.
The standard config parameters it defines are:
- env_name (str): The environment name in the Gym registry of the rewards to compare.
- x_reward_cfgs (Iterable[common_config.RewardCfg]): tuples of reward_type and reward_path
for x-axis.
- y_reward_cfgs (Iterable[common_config.RewardCfg]): tuples of reward_type and reward_path
for y-axis.
- log_root (str): the root directory to log; subdirectory path automatically constructed.
- n_bootstrap (int): the number of bootstrap samples to take.
- alpha (float): percentile confidence interval
- aggregate_kinds (Iterable[str]): the type of aggregations to perform across seeds.
Not used in `plot_return_heatmap` which only supports its own kind of bootstrapping.
- heatmap_kwargs (dict): passed through to `analysis.compact_heatmaps`.
- styles (Iterable[str]): styles to apply from `evaluating_rewards.analysis.stylesheets`.
- save_kwargs (dict): passed through to `analysis.save_figs`.
"""
# pylint: disable=unused-variable,too-many-statements
@experiment.config
def default_config():
"""Default configuration values."""
data_root = serialize.get_output_dir() # where models are read from
log_root = serialize.get_output_dir() # where results are written to
n_bootstrap = 1000 # number of bootstrap samples
alpha = 95 # percentile confidence interval
aggregate_kinds = ("bootstrap", "studentt", "sample")
_ = locals()
del _
@experiment.config
def aggregate_fns(aggregate_kinds, n_bootstrap, alpha):
"""Make a mapping of aggregate functions of kinds `subset` with specified parameters.
Used in scripts.distances.{epic,npec}; currently ignored by erc since
it does not use multiple seeds and instead bootstraps over samples.
"""
aggregate_fns = {}
if "bootstrap" in aggregate_kinds:
aggregate_fns["bootstrap"] = functools.partial(
bootstrap_ci, n_bootstrap=n_bootstrap, alpha=alpha
)
if "studentt" in aggregate_kinds:
aggregate_fns["studentt"] = functools.partial(studentt_ci, alpha=alpha)
if "sample" in aggregate_kinds:
aggregate_fns["sample"] = sample_mean_sd
@experiment.config
def point_mass_as_default():
"""Default to PointMass as environment so scripts work out-of-the-box."""
locals().update(**common_config.COMMON_CONFIGS["point_mass"])
for name, cfg in common_config.COMMON_CONFIGS.items():
experiment.add_named_config(name, cfg) | 5,334,312 |
def read_audio(path, Fs=None, mono=False):
"""Read an audio file into a np.ndarray.
Args:
path (str): Path to audio file
Fs (scalar): Resample audio to given sampling rate. Use native sampling rate if None. (Default value = None)
mono (bool): Convert multi-channel file to mono. (Default value = False)
Returns:
x (np.ndarray): Waveform signal
Fs (scalar): Sampling rate
"""
return librosa.load(path, sr=Fs, mono=mono) | 5,334,313 |
def evaluate_and_export(estimator #: BaseEstimator,
, X: np.ndarray, filename: str):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False) | 5,334,314 |
def test_get_matrix():
"""
test
Description:
Expectation:
"""
circ = Circuit().ry('a', 0).rz('b', 0).ry('c', 0)
m = circ.matrix(np.array([7.902762e-01, 2.139225e-04, 7.795934e-01]))
assert np.allclose(m[0, 0], 0.70743435 - 1.06959724e-04j) | 5,334,315 |
def energies_from_mbe_log(filename):
"""Monomer dimer energies from log file."""
monomers, dimers, trimers, tetramers = {}, {}, {}, {}
hf, os_, ss_ = True, False, False
mons, dims, tris, tets = True, False, False, False
energies = False
def storeEnergy(dict_, key, energy):
"""Store energy in given dict depending on whether HF, OS or SS."""
energy = float(energy)
if hf:
dict_[key] = {'hf': energy, 'os': np.nan, 'ss': np.nan}
elif os_:
dict_[key]['os'] = energy
elif ss_:
dict_[key]['ss'] = energy
return dict_
dir, File = os.path.split(filename)
dir = dir or "."
lines = eof(dir+'/', File, 0.15)
for line in lines:
if not line.strip():
continue
elif '-----ENERGIES OF MONOMERS------' in line:
energies = True
tets = False
tris = False
dims = False
mons = True
elif not energies:
continue
elif 'Final E(HF) =' in line:
break
elif 'DIMER ENERGY CORRECTION' in line:
dims = True
mons = False
elif 'TRIMER ENERGY CORRECTION' in line:
dims = False
tris = True
elif 'TETRAMER ENERGY CORRECTION' in line:
tris = False
tets = True
elif 'RI-MP2 OS energies***' in line:
ss_ = False
os_ = True
hf = False
elif 'RI-MP2 SS energies***' in line:
ss_ = True
os_ = False
hf = False
elif 'ID' in line:
if 'RIJ' in line:
rij = True
else:
rij = False
# ENERGIES
else:
# IF ENERGIES IN LINE
if re.search('^[0-9]', line) or line.startswith('('):
if mons:
if rij:
spl_line = line.split()
if len(spl_line) == 3:
id, e, rij = spl_line
elif len(spl_line) == 2:
id, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
sys.exit("Unexpected number of items in split line")
else:
id, e = line.split()
monomers = storeEnergy(monomers, id, e)
elif dims:
if rij:
spl_line = line.split()
if len(spl_line) == 4:
id1, id2, e, rij = spl_line
elif len(spl_line) == 3:
id1, id2, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
sys.exit("Unexpected number of items in split line")
else:
id1, id2, e = line.split()
key = keyName(id1, id2)
dimers = storeEnergy(dimers, key, e)
elif tris:
if rij:
spl_line = line.split()
if len(spl_line) == 5:
id1, id2, id3, e, rij = spl_line
elif len(spl_line) == 4:
id1, id2, id3, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
id1, id2, id3, e = line.split()
key = keyName(id1, id2, id3)
trimers = storeEnergy(trimers, key, e)
elif tets:
if rij:
if len(spl_line) == 5:
id1, id2, id3, id4, e, rij = spl_line
elif len(spl_line) == 4:
id1, id2, id3, id4, hold = spl_line
e = hold[:-1]
rij = hold[-1]
else:
id1, id2, id3, id4, e = line.split()
key = keyName(id1, id2, id3, id4)
tetramers = storeEnergy(tetramers, key, e)
return monomers, dimers, trimers, tetramers | 5,334,316 |
def build_varied_y_node_mesh(osi, xs, ys, zs=None, active=None):
"""
Creates an array of nodes that in vertical lines, but vary in height
The mesh has len(xs)=ln(ys) nodes in the x-direction and len(ys[0]) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
from numpy import array
if not hasattr(zs, '__len__'):
zs = [zs]
sn = []
for xx in range(len(xs)):
sn.append([])
for yy in range(len(ys[xx])):
if len(zs) == 1:
if active is None or active[xx][yy]:
if osi.ndm == 2:
pms = [osi, xs[xx], ys[xx][yy]]
else:
pms = [osi, xs[xx], ys[xx][yy], zs[0]]
sn[xx].append(Node(*pms))
else:
sn[xx].append(None)
else:
sn[xx].append([])
for zz in range(len(zs)):
# Establish left and right nodes
if active is None or active[xx][yy][zz]:
sn[xx][yy].append(Node(osi, xs[xx], ys[xx][yy], zs[zz]))
else:
sn[xx][yy].append(None)
# if len(zs) == 1:
# return sn[0]
return array(sn) | 5,334,317 |
def self_quarantine_end_10():
"""
Real Name: b'self quarantine end 10'
Original Eqn: b'50'
Units: b'Day'
Limits: (None, None)
Type: constant
b''
"""
return 50 | 5,334,318 |
def main():
"""Make a jazz noise here"""
args = get_args()
file_arg = args.FILE
skip_arg = args.skip
key_arg = args.keyword
out_arg = args.output
if not os.path.isfile(file_arg):
die('"{}" is not a file'.format(file_arg))
skip_arg = [j.lower() for j in skip_arg]
key_arg = [key_arg]
num_skip = 0
no_match = 0
total = 0
new = []
out_file = open(out_arg, 'wt')
print('Processing "{}"'.format(file_arg))
for seq_record in SeqIO.parse(file_arg, 'swiss'):
all_species = (seq_record.annotations)
keyword = all_species.get('keywords')
keyword = [x.lower() for x in keyword]
match = any([e for e in key_arg if e in keyword]) #True or false
total += 1
if match != True:
no_match += 1
continue
else:
taxa = all_species.get('taxonomy')
taxa = [y.lower() for y in taxa]
if skip_arg:
match_taxa = any([e for e in skip_arg if e in taxa]) #True or false
if match_taxa == True:
num_skip += 1
continue
if out_arg:
SeqIO.write(seq_record, out_file, 'fasta')
num_skips = num_skip+no_match
total_num = total - num_skips
print('Done, skipped {} and took {}. See output in "{}".'.format(num_skips, total_num, out_arg)) | 5,334,319 |
def spexsxd_scatter_model(dat, halfwid=48, xlims=[470, 1024], ylims=[800, 1024], full_output=False, itime=None):
"""Model the scattered light seen in SpeX/SXD K-band frames.
:INPUTS:
dat : str or numpy array
filename of raw SXD frame to be corrected, or a Numpy array
containing its data.
:OPTIONS:
halfwid : int
half-width of the spectral orders. Experience shows this is
approximately 48 pixels. This value is not fit!
xlims : list of length 2
minimum and maximum x-pixel values to use in the fitting
ylims : list of length 2
minimum and maximum y-pixel values to use in the fitting
full_output : bool
whether to output only model, or the tuple (model, fits, chisq, nbad)
itime : float
integration time, in seconds, with which to scale the initial
guesses
:OUTPUT:
scatter_model : numpy array
Model of the scattered light component, for subtraction or saving.
OR:
scatter_model, fits, chis, nbad
:REQUIREMENTS:
:doc:`pyfits`, :doc:`numpy`, :doc:`fit_atmo`, :doc:`analysis`, :doc:`phasecurves`
:TO_DO_LIST:
I could stand to be more clever in modeling the scattered light
components -- perhaps fitting for the width, or at least
allowing the width to be non-integer.
"""
# 2011-11-10 11:10 IJMC: Created
import analysis as an
import phasecurves as pc
try:
from astropy.io import fits as pyfits
except:
import pyfits
############################################################
# Define some helper functions:
############################################################
def tophat(param, x):
"""Grey-pixel tophat function with set width
param: [cen_pix, amplitude, background]
x : must be array of ints, arange(0, size-1)
returns the model."""
# 2011-11-09 21:37 IJMC: Created
intpix, fracpix = int(param[0]), param[0] % 1
th = param[1] * ((-halfwid <= (x - intpix)) * ((x - intpix) < halfwid))
# th = * th.astype(float)
if (intpix >= halfwid) and ((intpix - halfwid) < x.size):
th[intpix - halfwid] = param[1]*(1. - fracpix)
if (intpix < (x.size - halfwid)) and ((intpix + halfwid) >= 0):
th[intpix + halfwid] = param[1]*fracpix
return th + param[2]
def tophat2g(param, x, p0prior=None):
"""Grey-pixel double-tophat plus gaussian
param: [cen_pix1, amplitude1, cen_pix2, amplitude2, g_area, g_sigma, g_center, background]
x : must be ints, arange(0, size-1)
returns the model.""" # 2011-11-09 21:37 IJMC: Created
#th12 =
#th2 =
#gauss =
# if p0prior is not None:
# penalty =
return tophat([param[0], param[1], 0], x) + \
tophat([param[2], param[3], 0], x) + \
gaussian(param[4:7], x) + param[7]
############################################################
# Parse inputs
############################################################
halfwid = int(halfwid)
if isinstance(dat, np.ndarray):
if itime is None:
itime = 1.
else:
if itime is None:
try:
itime = pyfits.getval(dat, 'ITIME')
except:
itime = 1.
dat = pyfits.getdata(dat)
nx, ny = dat.shape
scatter_model = np.zeros((nx, ny), dtype=float)
chis, fits, nbad = [], [], []
iivals = np.arange(xlims[1]-1, xlims[0], -1, dtype=int)
position_offset = 850 - ylims[0]
est_coefs = np.array([ -5.02509772e-05, 2.97212397e-01, -7.65702234e+01])
estimated_position = np.polyval(est_coefs, iivals) + position_offset
estimated_error = 0.5
# to hold scattered light position fixed, rather than fitting for
# that position, uncomment the following line:
#holdfixed = [0]
holdfixed = None
############################################################
# Start fitting
############################################################
for jj, ii in enumerate(iivals):
col = dat[ylims[0]:ylims[1], ii]
ecol = np.ones(col.size, dtype=float)
x = np.arange(col.size, dtype=float)
if len(fits)==0:
all_guess = [175 + position_offset, 7*itime, \
70 + position_offset, 7*itime, \
250*itime, 5, 89 + position_offset, 50]
else:
all_guess = fits[-1]
all_guess[0] = estimated_position[jj]
model_all = tophat2g(all_guess, x)
res = (model_all - col)
badpix = np.abs(res) > (4*an.stdr(res, nsigma=4))
ecol[badpix] += 1e9
fit = an.fmin(pc.errfunc, all_guess, args=(tophat2g, x, col, 1./ecol**2), full_output=True, maxiter=1e4, maxfun=1e4, disp=False, kw=dict(testfinite=False), holdfixed=holdfixed)
best_params = fit[0].copy()
res = tophat2g(best_params, x) - col
badpix = np.abs(res) > (4*an.stdr(res, nsigma=4))
badpix[((np.abs(np.abs(x - best_params[0]) - 48.)) < 2) + \
((np.abs(np.abs(x - best_params[2]) - 48.)) < 2)] = False
badpix += (np.abs(res) > (20*an.stdr(res, nsigma=4)))
ecol = np.ones(col.size, dtype=float)
ecol[badpix] += 1e9
best_chisq = pc.errfunc(best_params, tophat2g, x, col, 1./ecol**2)
# Make sure you didn't converge on the wrong model:
for this_offset in ([-2, 0, 2]):
this_guess = fit[0].copy()
this_guess[2] += this_offset
this_guess[0] = estimated_position[jj]
#pc.errfunc(this_guess, tophat2g, x, col, 1./ecol**2)
this_fit = an.fmin(pc.errfunc, this_guess, args=(tophat2g, x, col, 1./ecol**2), full_output=True, maxiter=1e4, maxfun=1e4, disp=False, kw=dict(testfinite=False), holdfixed=holdfixed)
#print this_offset1, this_offset2, this_fit[1]
if this_fit[1] < best_chisq:
best_chisq = this_fit[1]
best_params = this_fit[0].copy()
fits.append(best_params)
chis.append(best_chisq)
nbad.append(badpix.sum())
mod2 = tophat2g(best_params, x)
scatter_model[ylims[0]:ylims[1], ii] = tophat(list(best_params[0:2])+[0], x)
if full_output:
return scatter_model, fits, chis, nbad
else:
return scatter_model | 5,334,320 |
def test_ap_track_sta_no_auth_passive(dev, apdev):
"""AP rejecting authentication from dualband STA on 2.4 GHz (passive)"""
try:
_test_ap_track_sta_no_auth_passive(dev, apdev)
finally:
subprocess.call(['iw', 'reg', 'set', '00']) | 5,334,321 |
def newff(minmax, size, transf=None):
"""
Create multilayer perceptron
:Parameters:
minmax: list of list, the outer list is the number of input neurons,
inner lists must contain 2 elements: min and max
Range of input value
size: the length of list equal to the number of layers except input layer,
the element of the list is the neuron number for corresponding layer
Contains the number of neurons for each layer
transf: list (default TanSig)
List of activation function for each layer
:Returns:
net: Net
:Example:
>>> # create neural net with 2 inputs
>>> # input range for each input is [-0.5, 0.5]
>>> # 3 neurons for hidden layer, 1 neuron for output
>>> # 2 layers including hidden layer and output layer
>>> net = newff([[-0.5, 0.5], [-0.5, 0.5]], [3, 1])
>>> net.ci
2
>>> net.co
1
>>> len(net.layers)
2
"""
net_ci = len(minmax)
net_co = size[-1]
if transf is None:
transf = [trans.TanSig()] * len(size)
assert len(transf) == len(size)
layers = []
for i, nn in enumerate(size):
layer_ci = size[i - 1] if i > 0 else net_ci
l = layer.Perceptron(layer_ci, nn, transf[i])
l.initf = init.initnw
layers.append(l)
connect = [[i - 1] for i in range(len(layers) + 1)]
net = Net(minmax, net_co, layers, connect, train.train_bfgs, error.SSE())
return net | 5,334,322 |
def write(*args, package="gw", file_format="dat", **kwargs):
"""Read in a results file.
Parameters
----------
args: tuple
all args are passed to write function
package: str
the package you wish to use
file_format: str
the file format you wish to use. Default None. If None, the read
function loops through all possible options
kwargs: dict
all kwargs passed to write function
"""
def _import(package, file_format):
"""Import format module with importlib
"""
return importlib.import_module(
"pesummary.{}.file.formats.{}".format(package, file_format)
)
def _write(module, file_format, args, kwargs):
"""Execute the write method
"""
return getattr(module, "write_{}".format(file_format))(*args, **kwargs)
if file_format == "h5":
file_format = "hdf5"
try:
module = _import(package, file_format)
return _write(module, file_format, args, kwargs)
except (ImportError, AttributeError, ModuleNotFoundError):
module = _import("core", file_format)
return _write(module, file_format, args, kwargs) | 5,334,323 |
def photo(el, dict_class, img_with_alt, base_url=''):
"""Find an implied photo property
Args:
el (bs4.element.Tag): a DOM element
dict_class: a python class used as a dictionary (set by the Parser object)
img_with_alt: a flag to enable experimental parsing of alt attribute with img (set by the Parser object)
base_url (string): the base URL to use, to reconcile relative URLs
Returns:
string or dictionary: the implied photo value or implied photo as a dictionary with alt value
"""
def get_photo_child(children):
"take a list of children and finds a valid child for photo property"
# if element has one image child use source if exists and img is
# not root class
poss_imgs = [c for c in children if c.name == 'img']
if len(poss_imgs) == 1:
poss_img = poss_imgs[0]
if not mf2_classes.root(poss_img.get('class', [])):
return poss_img
# if element has one object child use data if exists and object is
# not root class
poss_objs = [c for c in children if c.name == 'object']
if len(poss_objs) == 1:
poss_obj = poss_objs[0]
if not mf2_classes.root(poss_obj.get('class', [])):
return poss_obj
# if element is an img use source if exists
prop_value = get_img_src_alt(el, dict_class, img_with_alt, base_url)
if prop_value is not None:
return prop_value
# if element is an object use data if exists
prop_value = get_attr(el, "data", check_name="object")
if prop_value is not None:
return text_type(prop_value)
# find candidate child or grandchild
poss_child = None
children = list(get_children(el))
poss_child = get_photo_child(children)
# if no possible child found then look for grandchild if only one child which is not not mf2 root
if poss_child is None and len(children) == 1 and not mf2_classes.root(children[0].get('class', [])):
grandchildren = list(get_children(children[0]))
poss_child = get_photo_child(grandchildren)
# if a possible child was found parse
if poss_child is not None:
# img get src
prop_value = get_img_src_alt(poss_child, dict_class, img_with_alt, base_url)
if prop_value is not None:
return prop_value
# object get data
prop_value = get_attr(poss_child, "data", check_name="object")
if prop_value is not None:
return text_type(prop_value) | 5,334,324 |
def calc_TOF(t_pulse, t_signal):
"""Calculate TOF from pulse and signal time arrays."""
tof = []
idxs = [-1]
dbls = []
for t in t_signal:
idx = bisect_left(t_pulse, t)
if idx == len(t_pulse):
t_0 = t_pulse[-1]
else:
t_0 = t_pulse[idx - 1]
if idx == idxs[-1]:
dbls[-1] = 1
dbls.append(1)
else:
dbls.append(0)
idxs.append(idx)
tof.append((t - t_0) / 1e6) # convert to ps to us
return dbls, idxs, tof | 5,334,325 |
def get_nodes_ips(node_subnets):
"""Get the IPs of the trunk ports associated to the deployment."""
trunk_ips = []
os_net = clients.get_network_client()
tags = CONF.neutron_defaults.resource_tags
if tags:
ports = os_net.ports(status='ACTIVE', tags=tags)
else:
# NOTE(ltomasbo: if tags are not used, assume all the trunk ports are
# part of the kuryr deployment
ports = os_net.ports(status='ACTIVE')
for port in ports:
if (port.trunk_details and port.fixed_ips and
port.fixed_ips[0]['subnet_id'] in node_subnets):
trunk_ips.append(port.fixed_ips[0]['ip_address'])
return trunk_ips | 5,334,326 |
def get_visualizations_info(exp_id, state_name, interaction_id):
"""Returns a list of visualization info. Each item in the list is a dict
with keys 'data' and 'options'.
Args:
exp_id: str. The ID of the exploration.
state_name: str. Name of the state.
interaction_id: str. The interaction type.
Returns:
list(dict). Each item in the list is a dict with keys representing
- 'id': str. The visualization ID.
- 'data': list(dict). A list of answer/frequency dicts.
- 'options': dict. The visualization options.
An example of the returned value may be:
[{'options': {'y_axis_label': 'Count', 'x_axis_label': 'Answer'},
'id': 'BarChart',
'data': [{u'frequency': 1, u'answer': 0}]}]
"""
if interaction_id is None:
return []
visualizations = interaction_registry.Registry.get_interaction_by_id(
interaction_id).answer_visualizations
calculation_ids = set([
visualization.calculation_id for visualization in visualizations])
calculation_ids_to_outputs = {}
for calculation_id in calculation_ids:
# Don't show top unresolved answers calculation ouutput in stats of
# exploration.
if calculation_id == 'TopNUnresolvedAnswersByFrequency':
continue
# This is None if the calculation job has not yet been run for this
# state.
calc_output_domain_object = _get_calc_output(
exp_id, state_name, calculation_id)
# If the calculation job has not yet been run for this state, we simply
# exclude the corresponding visualization results.
if calc_output_domain_object is None:
continue
# If the output was associated with a different interaction ID, skip the
# results. This filtering step is needed since the same calculation_id
# can be shared across multiple interaction types.
if calc_output_domain_object.interaction_id != interaction_id:
continue
calculation_ids_to_outputs[calculation_id] = (
calc_output_domain_object.calculation_output.to_raw_type())
return [{
'id': visualization.id,
'data': calculation_ids_to_outputs[visualization.calculation_id],
'options': visualization.options,
'addressed_info_is_supported': (
visualization.addressed_info_is_supported),
} for visualization in visualizations
if visualization.calculation_id in calculation_ids_to_outputs] | 5,334,327 |
def push(array, *items):
"""Push items onto the end of `array` and return modified `array`.
Args:
array (list): List to push to.
items (mixed): Items to append.
Returns:
list: Modified `array`.
Warning:
`array` is modified in place.
Example:
>>> array = [1, 2, 3]
>>> push(array, 4, 5, [6])
[1, 2, 3, 4, 5, [6]]
See Also:
- :func:`push` (main definition)
- :func:`append` (alias)
.. versionadded:: 2.2.0
"""
pyd.each(items, array.append)
return array | 5,334,328 |
def compute_list(commandline_argument):
"""
Returns a list of booking or revenue data opening booking data file with
first parameter
"""
# utf-8_sig
# Open booking CSV and read everything into memory
with open(commandline_argument, "r", encoding="shift_jis") as database:
data = csv.reader(database)
next(data)
list_data = list(data)
return list_data | 5,334,329 |
def write_xyz(file_path, points: np.ndarray, normals=None, colors=None):
"""
Write point cloud file.
:param file_path:
:param points:
:param normals:
:param colors:
:return: None
"""
file_utils.make_dir_for_file(file_path)
if points.shape == (3,):
points = np.expand_dims(points, axis=0)
if points.shape[0] == 3 and points.shape[1] != 3:
points = points.transpose([1, 0])
if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3:
colors = colors.transpose([1, 0])
if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3:
normals = normals.transpose([1, 0])
with open(file_path, 'w') as fp:
# convert 2d points to 3d
if points.shape[1] == 2:
vertices_2p5d = np.zeros((points.shape[0], 3))
vertices_2p5d[:, :2] = points
vertices_2p5d[:, 2] = 0.0
points = vertices_2p5d
# write points
# meshlab doesn't like colors, only using normals. try cloud compare instead.
for vi, v in enumerate(points):
line_vertex = str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " "
if normals is not None:
line_vertex += str(normals[vi][0]) + " " + str(normals[vi][1]) + " " + str(normals[vi][2]) + " "
if colors is not None:
line_vertex += str(colors[vi][0]) + " " + str(colors[vi][1]) + " " + str(colors[vi][2]) + " "
fp.write(line_vertex + "\n") | 5,334,330 |
def calculate_saving(deal, item_prices):
"""
Parse the deal string and calculate how much money is saved
when this deal gets applied. Also returns deal requirement.
Args:
deal (str): deal information
item_prices (dict): {item: price}
Returns:
requirements (collections.Counter): items and quantity required to complete deal
eg. {'F': 3}
saving (int): total saving this deal gives
cost (int): cost of deal
"""
free_re = re.search(r'(\w+) get one ([^\n]+) free', deal)
if free_re:
# saving is value of free item
saving = item_prices[free_re.group(2)]
requirements = aggregate_requirements(free_re.groups())
quantity, item = parse_deal_code(free_re.group(1))
cost = get_cost(item_prices, item, quantity)
else:
# assuming for now that all other deals are just x-for
# saving is difference between deal price and quantity * base price
[(deal_code_quantity, deal_price)] = re.findall(r'(\w+) for (\w+)', deal)
deal_quantity, deal_item = parse_deal_code(deal_code_quantity)
saving = (deal_quantity * item_prices[deal_item]) - int(deal_price)
requirements = aggregate_requirements([deal_code_quantity])
cost = int(deal_price)
return requirements, saving, cost | 5,334,331 |
def assert_file_exists(file_path: str) -> None:
"""Assert filepath exists. Give verbose error messages.
Args:
file_path (str): file path e.g. abc/xyz.csv or xyz.csv
"""
dir = dirname(file_path)
assert dir == "" or isdir(dir), f"{dirname} directory doesn't exist.."
assert isfile(
file_path
), f"{file_path} is invalid. \nContents of {dir} are {[f for f in listdir(dir)]}" | 5,334,332 |
def fetchone_from_table(database, table, values_dict, returning):
"""
Constructs a generic fetchone database command from a generic table with provided table_column:value_dictionary mapping.
Mostly used for other helper functions.
:param database: Current active database connection.
:param table: Table to insert mapping into.
:param values_dict: A dictionary of table_column:value to ingest into the database.
:param returning: A single column, or list of columns, you want returned.
:return: The row in the database filtered on the column(s) defined.
"""
columns = list(values_dict.keys())
if type(returning) is not list and type(returning) is not tuple:
returning = [returning]
db = database.select(*returning).FROM(table).WHERE(Eq(columns[0], values_dict[columns[0]]))
for column in columns[1:]:
if values_dict[column] is not None:
db = db.AND(Eq(column, values_dict[column]))
else:
db = db.AND(IsNull(column))
return db.fetchone() | 5,334,333 |
def reset_errformat(with_func = errformat):
"""
Restore the original excformat function.
"""
global errformat
errformat = with_func | 5,334,334 |
def read_and_search(treap, data):
""" Search for each character in the given file (non-case-sensitive) in the given Treap.
Repeats this operation TIMED_COUNT times and prints out the average time.
Raises:
AssertionError: If a character (A-Z) is not found.
"""
# define core search method
def _search():
for c in data:
if not(treap.search(c)):
raise RuntimeError("Treap couldn't find expected key: '{}'.".format(c))
print("Treap took {:.3f} seconds (average of {} tests) to search for {} characters.\n".format(
timeit.timeit(_search, number=TIMED_COUNT)/TIMED_COUNT,
TIMED_COUNT,
len(data))) | 5,334,335 |
def copy_datastore(src, dst):
"""
Copy datastore so that the destination datastore is an replica of the source datastore
'Hidden' keys starting with '_' will not be copied. This is important because keys like
``_kombu*`` created by Redis do not have a string type and thus cannot be moved between
datastore backends.
:param src: Datastore source URL
:param dst: Datastore destination URL
:return: None
"""
src_ds = make_datastore(src)
dst_ds = make_datastore(dst)
dst_ds.flushdb()
dst_ds = make_datastore(dst)
keys = list(src_ds.keys())
for i, key in enumerate(keys):
print("Key %s (%d of %d)" % (key, i, len(keys)))
if key.startswith('_'):
print("Skipping %s" % (key))
continue
value = src_ds._get(key)
dst_ds._set(key, value) | 5,334,336 |
def capture_user_interruption():
"""
Tries to hide to the user the ugly python backtraces generated by
pressing Ctrl-C.
"""
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0)) | 5,334,337 |
def main() -> None:
"""ネットワーク構成を確認するためのスクリプト."""
torchinfo.summary(Mnist()) | 5,334,338 |
def test_tag_field_is_used_in_load_process():
"""
Confirm that the `_TAG` field is used when de-serializing to a dataclass
instance (even for nested dataclasses) when a value is set in the
`Meta` config for a JSONWizard sub-class.
"""
@dataclass
class Data(ABC):
""" base class for a Member """
number: float
class DataA(Data, JSONWizard):
""" A type of Data"""
class _(JSONWizard.Meta):
"""
This defines a custom tag that uniquely identifies the dataclass.
"""
tag = 'A'
class DataB(Data, JSONWizard):
""" Another type of Data """
class _(JSONWizard.Meta):
"""
This defines a custom tag that uniquely identifies the dataclass.
"""
tag = 'B'
class DataC(Data):
""" A type of Data"""
@dataclass
class Container(JSONWizard):
""" container holds a subclass of Data """
class _(JSONWizard.Meta):
tag = 'CONTAINER'
data: Union[DataA, DataB, DataC]
data = {
'data': {
TAG: 'A',
'number': '1.0'
}
}
# initialize container with DataA
container = Container.from_dict(data)
# Assert we de-serialize as a DataA object.
assert type(container.data) == DataA
assert isinstance(container.data.number, float)
assert container.data.number == 1.0
data = {
'data': {
TAG: 'B',
'number': 2.0
}
}
# initialize container with DataA
container = Container.from_dict(data)
# Assert we de-serialize as a DataA object.
assert type(container.data) == DataB
assert isinstance(container.data.number, float)
assert container.data.number == 2.0
# Test we receive an error when we provide an invalid tag value
data = {
'data': {
TAG: 'C',
'number': 2.0
}
}
with pytest.raises(ParseError):
_ = Container.from_dict(data) | 5,334,339 |
def report(function, *args, **kwds):
"""Run a function, catch, report and discard exceptions"""
try:
function(*args, **kwds)
except Exception:
traceback.print_exc() | 5,334,340 |
def recover_buildpack(app_folder):
"""
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
"""
filepath = os.path.join(app_folder, '.buildpack')
with open(filepath) as f:
buildpack_picked = f.read()
buildpack_picked = buildpack_picked.lstrip('/')
buildpack_picked = buildpack_picked.rstrip('\n')
buildpack_picked = os.path.join(os.getcwd(), buildpack_picked)
return BuildPack(buildpack_picked) | 5,334,341 |
def ParseMultiCpuMask(cpu_mask):
"""Parse a multiple CPU mask definition and return the list of CPU IDs.
CPU mask format: colon-separated list of comma-separated list of CPU IDs
or dash-separated ID ranges, with optional "all" as CPU value
Example: "0-2,5:all:1,5,6:2" -> [ [ 0,1,2,5 ], [ -1 ], [ 1, 5, 6 ], [ 2 ] ]
@type cpu_mask: str
@param cpu_mask: multiple CPU mask definition
@rtype: list of lists of int
@return: list of lists of CPU IDs
"""
if not cpu_mask:
return []
cpu_list = []
for range_def in cpu_mask.split(constants.CPU_PINNING_SEP):
if range_def == constants.CPU_PINNING_ALL:
cpu_list.append([constants.CPU_PINNING_ALL_VAL, ])
else:
# Uniquify and sort the list before adding
cpu_list.append(sorted(set(ParseCpuMask(range_def))))
return cpu_list | 5,334,342 |
def get_network_insights_analysis_output(network_insights_analysis_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkInsightsAnalysisResult]:
"""
Resource schema for AWS::EC2::NetworkInsightsAnalysis
"""
... | 5,334,343 |
def _call(arg):
"""Shortcut for comparing call objects
"""
return _Call(((arg, ), )) | 5,334,344 |
def gather_directives(
type_object: GraphQLNamedType,
) -> List[DirectiveNode]:
"""Get all directive attached to a type."""
directives: List[DirectiveNode] = []
if hasattr(type_object, "extension_ast_nodes"):
if type_object.extension_ast_nodes:
for ast_node in type_object.extension_ast_nodes:
if ast_node.directives:
directives.extend(ast_node.directives)
if hasattr(type_object, "ast_node"):
if type_object.ast_node and type_object.ast_node.directives:
directives.extend(type_object.ast_node.directives)
return directives | 5,334,345 |
def start_moderating():
"""Main driver for the moderation action."""
print("🙈 Attempting to moderate \"" + EXTENSION_NAME + "\" extension")
success = False
found_at_least_one_match = False
# Iterate through all extension folders looking for the manifest.json
# so we can find the extension name.
for filename in Path(EXTENSIONS_FOLDER).rglob('manifest.json'):
# Parse the messages looking for the `appname` attribute so we can match on it.
with open(filename, 'r') as manifest_file:
json_string = manifest_file.read().replace('\n', '')
# Sometimes the string format contains unicode characters.
parsed_json = json.loads(json_string.encode().decode('utf-8-sig'))
if parsed_json.get('name'):
name = str(parsed_json['name'])
success = False
# Often app name is just a key to a message, so we need to look it up in that case.
if "__MSG_" in name:
if get_name_from_messages(name, Path(EXTENSIONS_FOLDER)):
found_at_least_one_match = True
success = handle_matched_extension(filename, parsed_json)
elif name.lower() == EXTENSION_NAME.lower():
found_at_least_one_match = True
success = handle_matched_extension(filename, parsed_json)
if not found_at_least_one_match:
print("❗️ No matching extension found for \"" + EXTENSION_NAME + "\"")
sys.exit(1)
if not success:
print("❗️ Moderation completed with one or more errors. " +
"To see suggested resolutions, look for ⭐️'s in the output")
else:
print("✅ Moderation completed 🕺") | 5,334,346 |
def test_fooof_load():
"""Test load into FOOOF. Note: loads files from test_core_io."""
# Test loading just results
tfm = FOOOF(verbose=False)
file_name_res = 'test_fooof_res'
tfm.load(file_name_res, TEST_DATA_PATH)
# Check that result attributes get filled
for result in OBJ_DESC['results']:
assert not np.all(np.isnan(getattr(tfm, result)))
# Test that settings and data are None
# Except for aperiodic mode, which can be inferred from the data
for setting in OBJ_DESC['settings']:
if setting is not 'aperiodic_mode':
assert getattr(tfm, setting) is None
assert getattr(tfm, 'power_spectrum') is None
# Test loading just settings
tfm = FOOOF(verbose=False)
file_name_set = 'test_fooof_set'
tfm.load(file_name_set, TEST_DATA_PATH)
for setting in OBJ_DESC['settings']:
assert getattr(tfm, setting) is not None
# Test that results and data are None
for result in OBJ_DESC['results']:
assert np.all(np.isnan(getattr(tfm, result)))
assert tfm.power_spectrum is None
# Test loading just data
tfm = FOOOF(verbose=False)
file_name_dat = 'test_fooof_dat'
tfm.load(file_name_dat, TEST_DATA_PATH)
assert tfm.power_spectrum is not None
# Test that settings and results are None
for setting in OBJ_DESC['settings']:
assert getattr(tfm, setting) is None
for result in OBJ_DESC['results']:
assert np.all(np.isnan(getattr(tfm, result)))
# Test loading all elements
tfm = FOOOF(verbose=False)
file_name_all = 'test_fooof_all'
tfm.load(file_name_all, TEST_DATA_PATH)
for result in OBJ_DESC['results']:
assert not np.all(np.isnan(getattr(tfm, result)))
for setting in OBJ_DESC['settings']:
assert getattr(tfm, setting) is not None
for data in OBJ_DESC['data']:
assert getattr(tfm, data) is not None
for meta_dat in OBJ_DESC['meta_data']:
assert getattr(tfm, meta_dat) is not None | 5,334,347 |
def test_multiple_geospatial_types() -> None:
"""Multiple photo_type"""
collection = Collection("fake_collection")
item_a = Item("id_0")
item_a.linz_geospatial_type = "black and white image"
collection.add_item(item_a)
item_b = Item("id_1")
item_b.linz_geospatial_type = "black and white infrared image"
collection.add_item(item_b)
assert collection.get_linz_geospatial_type() == "invalid geospatial type" | 5,334,348 |
def get_request(language=None):
"""
Returns a Request instance populated with cms specific attributes.
"""
request_factory = RequestFactory()
request = request_factory.get("/")
request.session = {}
request.LANGUAGE_CODE = language or settings.LANGUAGE_CODE
request.current_page = None
request.user = AnonymousUser()
return request | 5,334,349 |
def validate_hash(value: str) -> bool:
"""
Validates a hash value.
"""
return cast(bool, HASH_RE.match(value)) | 5,334,350 |
def pytorch_mnist(filename='pytorch_mnist.py'):
"""
Returns a Pytorch MNIST Classifier template.
"""
with open(filename, 'w') as f:
f.write("""import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms, datasets
def load_mnist():
train = datasets.MNIST('../../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()]))
test = datasets.MNIST('../../data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()]))
trainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)
testset = torch.utils.data.DataLoader(test, batch_size=10, shuffle=False)
return trainset, testset
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
if __name__ == '__main__':
net = Net()
X = torch.rand((28, 28))
X = X.view(-1, 28*28)
output = net(X)
print(output)
""") | 5,334,351 |
def string_to_digit(string, output):
"""Convert string to float/int if possible (designed to extract number
from a price sting, e.g. 250 EUR -> 250)
:argument string: string to convert
:type string: str
:argument output: output type
:type output: type
:returns float/int or None
"""
string = strip_space(string)
if not string[0].isdigit() and not string[1].isdigit():
return None
string_items = []
for index, item in enumerate(string):
if item.isdigit():
string_items.append(item)
else:
if item == ',':
string_items.append('.')
elif item == ' ' and string[index + 1].isdigit():
pass
elif not item.isdigit() and not string[index + 1].isdigit():
break
if '.' in string_items and output == int:
return int(float(''.join(string_items)))
return output(''.join(string_items)) | 5,334,352 |
def calc_B_phasors(current, xp, yp, cable_array):
"""It calculates the phasors of the x and y components of the
magnetic induction field B in a given point for a given cable.
Given the input, the function rectifies the current phase
extracting respectively the real and imaginary part of it.
Then, both real and imaginary part of x and y components are
multiplied by a transfer function (dependent on the spatial
disposition of the cable in respect to the point of interest)
resulting in the magnetic inductin B phasor components of a
single cable.
Parameters
-------------------
current : int
Current (A) circulating inside the considered power line
(composed of a triad of cables)
xp, yp : float
Abscissa (m) and ordinate (m) of the point of interest where
the magnetic induction field B will be calculated at last
cable_array : numpy.ndarray
First column - Current phase belonging to the n-th cable under consideration
Second and third columns - Abscissa and ordinate of the n-th cable under consideration
Returns
-------------------
B_phasors_n : numpy.ndarray
Respectively the real and imaginary part (columns) of the
x and y components (rows) of the magnetic induction field B
produced by a single cable in a given point
Notes
-------------------
The current function implements the calculations present both in
[1]_"Norma Italiana CEI 106-11" formulas (5) and [2]_"Norma Italiana
CEI 211-4" formulas (16).
References
-------------------
..[1] Norma Italiana CEI 106-11, "Guide for the determination of
the respect widths for power lines and substations according to
DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables",
first edition, 2006-02.
..[2] Norma Italiana CEI 211-4, "Guide to calculation methods of
electric and magnetic fields generated by power-lines and electrical
substations", second edition, 2008-09.
"""
ph_n_rad = radians(cable_array[0])
I_complex = rect(current, ph_n_rad)
I_components = np.array([I_complex.real, I_complex.imag])
coef = (MU_ZERO / (2*PI)) / ((xp - cable_array[1])**2 + (yp - cable_array[2])**2)
transfer_fn_n = np.array([(cable_array[2] - yp) * coef, (xp - cable_array[1]) * coef]).reshape(2, 1)
B_phasors_n = I_components * transfer_fn_n
return B_phasors_n | 5,334,353 |
def get_shortest_text_value(entry):
"""Given a JSON-LD entry, returns the text attribute that has the
shortest length.
Parameters
----------
entry: dict
A JSON-LD entry parsed into a nested python directionary via the json
module
Returns
-------
short_text: str
Of the text values, the shortest one
"""
text_attr = 'http://www.ontologyrepository.com/CommonCoreOntologies/has_text_value'
if text_attr in entry:
text_values = entry[text_attr]
text_values = [i['@value'] for i in text_values]
return get_shortest_string(text_values)
else:
return None | 5,334,354 |
def generate_token(user_id, expires_in=3600):
"""Generate a JWT token.
:param user_id the user that will own the token
:param expires_on expiration time in seconds
"""
secret_key = current_app.config['JWT_SECRET_KEY']
return jwt.encode(
{'user_id': user_id,
'exp': datetime.utcnow() + timedelta(seconds=expires_in)},
secret_key, algorithm='HS256').decode('utf-8') | 5,334,355 |
def debom(s):
"""
此函数是去除字符串中bom字符,
由于此字符出现再文件的头位置
所以对csv 的header造成的影响, 甚至乱码
通过此函数可以避免这种情况
"""
boms = [ k for k in dir(codecs) if k.startswith('BOM') ]
for bom in boms:
s = s.replace(getattr(codecs, bom), '')
return s | 5,334,356 |
def rolling_window(array, window):
"""
apply a rolling window to a np.ndarray
:param array: (np.ndarray) the input Array
:param window: (int) length of the rolling window
:return: (np.ndarray) rolling window on the input array
"""
shape = array.shape[:-1] + (array.shape[-1] - window + 1, window)
strides = array.strides + (array.strides[-1],)
return np.lib.stride_tricks.as_strided(array, shape=shape, strides=strides) | 5,334,357 |
def calculate_timezone_distance_matrix(df):
"""
Calculate timezone distance matrix from a given dataframe
"""
n_users = len(df)
timezone_df = df[['idx', 'timezone', 'second_timezone']]
timezone_df.loc[:, 'timezone'] = timezone_df.timezone.map(
lambda t: remove_text_parentheses(t).split(' ')[-1]
)
timezone_df.loc[:, 'second_timezone'] = timezone_df.second_timezone.map(
lambda t: remove_text_parentheses(t).split(' ')[-1].replace('me', ' ')
)
timezone_list = timezone_df.to_dict(orient='records')
D_tz = np.zeros((n_users, n_users))
for d1, d2 in product(timezone_list, timezone_list):
idx1, idx2, tz_dist = compute_tz_distance_dict(d1, d2)
D_tz[idx1, idx2] = tz_dist
return D_tz | 5,334,358 |
def pyxstyle_path(x, venv_dir="venv"):
"""Calculate the path to py{x}style in the venv directory relative to project root."""
extension = ".exe" if platform.system() == "Windows" else ""
bin_dir = "Scripts" if platform.system() == "Windows" else "bin"
return [str(path_here / f"{venv_dir}/{bin_dir}/py{x}style{extension}")] | 5,334,359 |
def show_mode(loop):
"""Show mode without genetic algorithm. Used only for displaying the results"""
'''loop = input('How many times you want to display last population?\n')
try:
loop = int(loop)
except ValueError:
print('Invalid number!')'''
for iterator in range(loop):
scores = run_generation()
print('Best: {} Average: {}'.format(max(scores), st.mean(scores))) | 5,334,360 |
def update_tc_junit_resultfile(tc_junit_obj, kw_junit_list, tc_timestamp):
"""loop through kw_junit object and attach keyword result to testcase
Arguments:
1. tc_junit_obj = target testcase
2. kw_junit_list = list of keyword junit objects
3. tc_timestamp = target testcase timestamp
"""
for master_tc in tc_junit_obj.root.iter('testcase'):
# make sure we are modifying the correct testcase
if master_tc.get('timestamp') == tc_timestamp:
for kw_junit_obj in kw_junit_list:
for tc_part in kw_junit_obj.root.iter('testcase'):
# make sure we are obtaining only the wanted keywords
if tc_part.get('timestamp') == tc_timestamp:
# add keyword element to testcase, add property result
# to properties, update count
for result in tc_part.find('properties').iter('property'):
if result.get('type') == "keyword":
master_tc.find('properties').append(result)
master_tc.attrib = update_attribute(master_tc.attrib, tc_part.attrib)
return tc_junit_obj | 5,334,361 |
def checkFramebufferStatus():
"""Utility method to check status and raise errors"""
status = glCheckFramebufferStatus( GL_FRAMEBUFFER )
if status == GL_FRAMEBUFFER_COMPLETE:
return True
from OpenGL.error import GLError
description = None
for error_constant in [
GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT,
GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS,
GL_FRAMEBUFFER_INCOMPLETE_FORMATS,
GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER,
GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER,
GL_FRAMEBUFFER_UNSUPPORTED,
]:
if status == error_constant:
status = error_constant
description = str(status)
raise GLError(
err=status,
result=status,
baseOperation=glCheckFramebufferStatus,
description=description,
) | 5,334,362 |
def listen(host, port, datadir, postProcessing, iridiumHost, iridiumPort):
""" Run server to listen for transmissions
"""
logger = logging.getLogger('DirectIP')
logger.info('Executing runserver.')
if host is None:
logger.critical('Invalid host: "%s"' % host)
assert host is not None
if datadir == None:
datadir = os.getcwd()
logger.warn('Missing --datadir. Will use current directory.')
logger.debug('Calling server.')
if (iridiumHost is not None) and (iridiumPort is not None):
logger.debug('Iridium server at %s:%s' % (iridiumHost, iridiumPort))
runserver(host, port, datadir, postProcessing,
outbound_address=(iridiumHost, iridiumPort))
else:
logger.warn('Missing Iridium address to forward outbound messages!')
runserver(host, port, datadir, postProcessing) | 5,334,363 |
def reset_xform_and_collapse(node_name, freeze=False):
"""
Resets the xform and collapse the stack of the given node
:param node_name: str
:param freeze: bool
"""
node = node_utils.get_pymxs_node(node_name)
rt.ResetXForm(node)
rt.CollapseStack(node)
if freeze:
freeze_transform(node) | 5,334,364 |
def load_sentences(filename):
"""give us a list of sentences where each sentence is a list of tokens.
Assumes the input file is one sentence per line, pre-tokenized."""
out = []
with open(filename) as infile:
for line in infile:
line = line.strip()
tokens = line.split()
out.append(tokens)
return out | 5,334,365 |
def imageSequenceRepr(files, strFormat='{pre}[{firstNum}:{lastNum}]{post}', forceRepr=False):
""" Takes a list of files and creates a string that represents the sequence.
Args:
files (list): A list of files in the image sequence.
format (str): Used to format the output. Uses str.format() command and requires the
keys [pre, firstNum, lastNum, post]. Defaults to '{pre}[{firstNum}:{lastNum}]{post}'
forceRepr (bool): If False and a single frame is provided, it will return just that frame.
If True and a single frame is provided, it will return a repr with that frame as the
firstNum and lastNum value. False by default.
Returns:
str: A string representation of the Image Sequence.
"""
if len(files) > 1 or (forceRepr and files):
match = imageSequenceInfo(files[0])
if match:
info = {}
for f in files:
frame = imageSequenceInfo(f)
if frame and frame.group('frame'):
frame = frame.group('frame')
info.update({int(frame):frame})
if info:
keys = sorted(info.keys())
low = info[keys[0]]
high = info[keys[-1]]
if forceRepr or low != high:
return strFormat.format(pre=match.group('pre'), firstNum=low, lastNum=high, post=match.group('post'))
if files:
return files[0]
return '' | 5,334,366 |
def farthest_point_sampling(D, k, random_init=True):
"""
Samples points using farthest point sampling
Parameters
-------------------------
D : (n,n) distance matrix between points
k : int - number of points to sample
random_init : Whether to sample the first point randomly or to
take the furthest away from all the other ones
Output
--------------------------
fps : (k,) array of indices of sampled points
"""
if random_init:
inds = [np.random.randint(D.shape[0])]
else:
inds = [np.argmax(D.sum(1))]
dists = D[inds]
for _ in range(k-1):
newid = np.argmax(dists)
inds.append(newid)
dists = np.minimum(dists,D[newid])
return np.asarray(inds) | 5,334,367 |
def part2(lines):
"""
>>> part2(load_example(__file__, '9'))
982
"""
return run(lines, max) | 5,334,368 |
def use_in_cluster_config(token_file="/var/run/secrets/kubernetes.io/serviceaccount/token", # nosec
ca_cert_file="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"):
"""
Configure the client using the recommended configuration for accessing the API from within a Kubernetes cluster:
https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
"""
global api_token_source
global verify_ssl
api_token_source = FileTokenSource(token_file)
if os.path.exists(ca_cert_file):
verify_ssl = ca_cert_file | 5,334,369 |
def load_description():
"""
Reads pandas dataframe with name(description) and category for FG phenos
"""
description = "./description.txt"
if not os.path.isfile(description):
cmd = f"gsutil cp {pheno_description} {description}"
subprocess.call(shlex.split(cmd))
d = pandas.read_csv(description,delimiter='\t',encoding= 'unicode_escape',index_col = "phenocode").T
return d | 5,334,370 |
def country_call_code():
"""Get information about the calling code of a country"""
pass | 5,334,371 |
def get_utility_function(args):
"""
Select the utility function.
:param args: the arguments for the program.
:return: the utility function (handler).
"""
if args.mode == 'entropy':
utility_function = compute_utility_scores_entropy
elif args.mode == 'entropyrev': # Reverse entropy method
utility_function = compute_utility_scores_entropyrev
elif args.mode == 'maxprivacy': # maximize privacy cost
utility_function = compute_utility_scores_privacy2
elif args.mode == 'gap':
utility_function = compute_utility_scores_gap
elif args.mode == 'greedy':
utility_function = compute_utility_scores_greedy
elif args.mode == 'deepfool':
utility_function = compute_utility_scores_deepfool
elif args.mode == 'random':
utility_function = compute_utility_scores_random
elif args.mode == "knockoff": # Knockoff Nets with Random querying
utility_function = compute_utility_scores_random
elif args.mode == "copycat": # CopyCat CNN
utility_function = compute_utility_scores_random
elif args.mode == 'jacobian' or args.mode == 'jacobiantr': # JBDA, JBDA-TR
utility_function = compute_utility_scores_random
elif args.mode == "inoutdist": # Potential attack (combine ID and OOD Data)
utility_function = compute_utility_scores_random
elif args.mode == "worstcase": # Attacker knows exact value of the privacy cost
utility_function = compute_utility_scores_privacy
elif args.mode == "worstcasepate": # Attacker knows exact value of the pate cost
utility_function = compute_utility_scores_pate
else:
raise Exception(f"Unknown query selection mode: {args.mode}.")
return utility_function | 5,334,372 |
def convertRequestToStringWhichMayBeEmpty(paramName, source):
""" Handle strings which may be empty or contain "None".
Empty strings should be treated as "None". The "None" strings are from the timeSlicesValues
div on the runPage.
Args:
paramName (str): Name of the parameter in which we are interested in.
source (dict): Source of the information. Usually request.args or request.form.
This function is fairly similar to `convertRequestToPythonBool`.
"""
paramValue = source.get(paramName, None, type=str)
#logger.info("{0}: {1}".format(paramName, paramValue))
#if paramValue == "" or paramValue == "None" or paramValue == None:
# If we see "None", then we want to be certain that it is None!
# Otherwise, we will interpret an empty string as a None value!
if paramValue == "" or paramValue == "None":
paramValue = None
# To get an empty string, we need to explicitly select one with this contrived value.
# We need to do this because it is possible for the group selection pattern to be an empty string,
# but that is not equal to no hist being selected in a request.
if paramValue == "nonSubsystemEmptyString":
paramValue = ""
logger.info("{0}: {1}".format(paramName, paramValue))
return paramValue | 5,334,373 |
def find_am(list_tokens:list):
"""[summary]
Parameters
----------
list_tokens : list
list of tokens
Returns
-------
tuple(list,list)
matches tokens,
token indexes
"""
string = "START"+" ".join(list_tokens).lower()
match = re.findall(am_pattern, string)
if match:
matched_tokens = match[0].split()
indexes = list(range(len(matched_tokens)))
return matched_tokens, indexes
else:
return [], []
return match | 5,334,374 |
def fetch_seq_assembly(mygroup, assembly_final_df, keyargs):
"""
Function that will fetch the genome from the taxid in group and
concat the assembly that is created by ngs
"""
keyargs['taxids'] = [str(taxid) for taxid in mygroup.TaxId.tolist()]
#ngd.download(**keyargs)
get_cmdline_ndg(**keyargs)
# Test if we download genomes else return assembly_final_df
if os.path.isfile(snakemake.output.assembly_output):
# Read the information about the assembly and concatenate with previous one
tmp_assembly = pd.read_table(snakemake.output.assembly_output)
# Remove the file
os.remove(snakemake.output.assembly_output)
return pd.concat([assembly_final_df, tmp_assembly])
else :
return assembly_final_df | 5,334,375 |
def calculate_min_cost_path(source_node: int, target_node: int, graph: nx.Graph) -> (list, int):
"""
Calculates the minimal cost path with respect to node-weights from terminal1 to terminal2 on the given graph by
converting the graph into a line graph (converting nodes to edges) and solving the respective shortest path problem
on the edges.
:param source_node: the source node from the given graph from which to calculate the min cost path to the target
:param target_node: the target node from the given graph
:param graph: the graph on which we want to find the min cost path from source to target with respect to
the node weights that are labelled with 'cost'
:return: the min cost path from source to target, and the cost of the path, with respect to the node costs
"""
line_graph = nx.line_graph(graph)
line_graph, source, target = adjust_line_graph(source_node, target_node, line_graph, graph)
path = nx.shortest_path(line_graph, source, target, weight="cost")
cost = calculate_path_cost_on_line_graph(path, line_graph)
path = convert_line_graph_path(path)
return path, cost | 5,334,376 |
def promote(name):
"""
Promotes a clone file system to no longer be dependent on its "origin"
snapshot.
.. note::
This makes it possible to destroy the file system that the
clone was created from. The clone parent-child dependency relationship
is reversed, so that the origin file system becomes a clone of the
specified file system.
The snapshot that was cloned, and any snapshots previous to this
snapshot, are now owned by the promoted clone. The space they use moves
from the origin file system to the promoted clone, so enough space must
be available to accommodate these snapshots. No new space is consumed
by this operation, but the space accounting is adjusted. The promoted
clone must not have any conflicting snapshot names of its own. The
rename subcommand can be used to rename any conflicting snapshots.
name : string
name of clone-filesystem
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.promote myzpool/myclone
"""
## Promote clone
res = __salt__["cmd.run_all"](
__utils__["zfs.zfs_command"](
command="promote",
target=name,
),
python_shell=False,
)
return __utils__["zfs.parse_command_result"](res, "promoted") | 5,334,377 |
def otp_verification(request):
"""Api view for verifying OTPs """
if request.method == 'POST':
serializer = OTPVerifySerializer(data = request.data)
if serializer.is_valid():
data = serializer.verify_otp(request)
return Response(data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST) | 5,334,378 |
def handler(event, context):
"""
Collects the report for a URL given in the SQS ``message`` from
VirusTotal's API.
"""
message_json = json.loads(event['Records'][0]['body'])
asset_type = message_json['type']
domain_or_ip = message_json[asset_type]
print(f'Domain or IP: {domain_or_ip}')
sleep(DELAY_SECONDS_BETWEEN_API_REQUESTS)
response = requests.get(
f'{VIRUSTOTAL_API_BASE_URL}/url/report',
params={
'apikey': VIRUSTOTAL_API_KEY,
'resource': domain_or_ip,
'allinfo': True
}
)
# raise an exception if the status code was 4xx or 5xx:
response.raise_for_status()
print('Collected results')
try:
result = response.json()
result['scans'] # check whether `scans` exists in the response
except (KeyError, json.JSONDecodeError) as e:
print(f'Could not parse scan results. Error: {e}')
# no scan results found, request a scan for this URL:
sleep(DELAY_SECONDS_BETWEEN_API_REQUESTS)
requests.post(
f'{VIRUSTOTAL_API_BASE_URL}/url/scan',
data={
'apikey': VIRUSTOTAL_API_KEY,
'url': domain_or_ip,
}
)
print(f'Requested scan')
sqs.send_message(
QueueUrl=os.environ['SQSQueueVirusTotalURL'],
MessageBody=json.dumps(message_json),
)
return
# save the score in DynamoDB:
dynamodb.update_item(
table_name=DYNAMODB_TABLE_ASSETS,
primary_key={
'type': asset_type,
'sk': domain_or_ip
},
update={
'vt_score': f'{result["positives"]}/{result["total"]}'
},
) | 5,334,379 |
def test_pod_status_parser(config, expected_status):
"""测试 Pod 状态解析逻辑"""
actual_status = PodStatusParser(config).parse()
assert actual_status == expected_status | 5,334,380 |
def AddGnuIncludeDirectivesToMakefile(infile, outfile):
"""Adds GNU-style include directives to a Makefile.
Inserts GNU-style include directives for the top-level configure.mk file and
directory-level .d files. Should be applied before makedepend output has
been stripped. This effectively converts the Makefiles to be GNU-only.
Args:
infile: Makefile to read
outfile: Makefile to write
"""
is_top_level = not os.path.dirname(infile.name)
TOP_CONFIGURE_INCLUDE = 'include configure.mk'
CONFIGURE_INCLUDE = 'include $(TOP)/configure.mk'
DEP_INCLUDE = '-include $(SRC:.c=.d)'
emit_config_include_if_needed = False
contains_dep_include = False
for line in infile:
if emit_config_include_if_needed:
emit_config_include_if_needed = False
if is_top_level:
if not line.startswith(TOP_CONFIGURE_INCLUDE):
print >>outfile, TOP_CONFIGURE_INCLUDE
print '%s: output configure.mk include directive' % infile.name
elif not line.startswith(CONFIGURE_INCLUDE):
print >>outfile, CONFIGURE_INCLUDE
print '%s: output configure.mk include directive' % infile.name
elif is_top_level and line.startswith('VERSION='):
emit_config_include_if_needed = True
elif line.startswith('TOP=') and not is_top_level:
emit_config_include_if_needed = True
elif line.startswith(DEP_INCLUDE):
contains_dep_include = True
elif line == MAKE_DEPEND_LINE and not contains_dep_include:
print >>outfile, DEP_INCLUDE
print '%s: output dependency file include directive' % infile.name
print >>outfile, line, | 5,334,381 |
def resetThreeDViews():
"""Reset focal view around volumes
"""
import slicer
slicer.app.layoutManager().resetThreeDViews() | 5,334,382 |
def parse_tag_file(doc: ET.ElementTree) -> dict:
"""
Takes in an XML tree from a Doxygen tag file and returns a dictionary that looks something like:
.. code-block:: python
{'PolyVox': Entry(...),
'PolyVox::Array': Entry(...),
'PolyVox::Array1DDouble': Entry(...),
'PolyVox::Array1DFloat': Entry(...),
'PolyVox::Array1DInt16': Entry(...),
'QScriptContext::throwError': FunctionList(...),
'QScriptContext::toString': FunctionList(...)
}
Note the different form for functions. This is required to allow for 'overloading by argument type'.
:Parameters:
doc : xml.etree.ElementTree
The XML DOM object
:return: a dictionary mapping fully qualified symbols to files
"""
mapping = {} # type: MutableMapping[str, Union[Entry, FunctionList]]
function_list = [] # This is a list of function to be parsed and inserted into mapping at the end of the function.
for compound in doc.findall('./compound'):
compound_kind = compound.get('kind')
if compound_kind not in {'namespace', 'class', 'struct', 'file', 'define', 'group', 'page'}:
continue
compound_name = compound.findtext('name')
compound_filename = compound.findtext('filename')
# TODO The following is a hack bug fix I think
# Doxygen doesn't seem to include the file extension to <compound kind="file"><filename> entries
# If it's a 'file' type, check if it _does_ have an extension, if not append '.html'
if compound_kind in ('file', 'page') and not os.path.splitext(compound_filename)[1]:
compound_filename = compound_filename + '.html'
# If it's a compound we can simply add it
mapping[compound_name] = Entry(kind=compound_kind, file=compound_filename)
for member in compound.findall('member'):
# If the member doesn't have an <anchorfile> element, use the parent compounds <filename> instead
# This is the way it is in the qt.tag and is perhaps an artefact of old Doxygen
anchorfile = member.findtext('anchorfile') or compound_filename
member_symbol = compound_name + '::' + member.findtext('name')
member_kind = member.get('kind')
arglist_text = member.findtext('./arglist') # If it has an <arglist> then we assume it's a function. Empty <arglist> returns '', not None. Things like typedefs and enums can have empty arglists
if arglist_text and member_kind not in {'variable', 'typedef', 'enumeration'}:
function_list.append((member_symbol, arglist_text, member_kind, join(anchorfile, '#', member.findtext('anchor'))))
else:
mapping[member_symbol] = Entry(kind=member.get('kind'), file=join(anchorfile, '#', member.findtext('anchor')))
for member_symbol, arglist, kind, anchor_link in function_list:
try:
normalised_arglist = normalise(member_symbol + arglist)[1]
except ParseException as e:
print('Skipping %s %s%s. Error reported from parser was: %s' % (kind, member_symbol, arglist, e))
else:
if mapping.get(member_symbol) and isinstance(mapping[member_symbol], FunctionList):
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
else:
mapping[member_symbol] = FunctionList()
mapping[member_symbol].add_overload(normalised_arglist, anchor_link)
return mapping | 5,334,383 |
def hpd_grid(sample, alpha=0.05, roundto=2):
"""Calculate highest posterior density (HPD) of array for given alpha.
The HPD is the minimum width Bayesian credible interval (BCI).
The function works for multimodal distributions, returning more than one mode
Parameters
----------
sample : Numpy array or python list
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
roundto: integer
Number of digits after the decimal point for the results
Returns
----------
hpd: array with the lower
"""
sample = np.asarray(sample)
sample = sample[~np.isnan(sample)]
# get upper and lower bounds
l = np.min(sample)
u = np.max(sample)
density = kde.gaussian_kde(sample)
x = np.linspace(l, u, 2000)
y = density.evaluate(x)
#y = density.evaluate(x, l, u) waitting for PR to be accepted
xy_zipped = zip(x, y/np.sum(y))
xy = sorted(xy_zipped, key=lambda x: x[1], reverse=True)
xy_cum_sum = 0
hdv = []
for val in xy:
xy_cum_sum += val[1]
hdv.append(val[0])
if xy_cum_sum >= (1-alpha):
break
hdv.sort()
diff = (u-l)/20 # differences of 5%
hpd = []
hpd.append(round(min(hdv), roundto))
for i in range(1, len(hdv)):
if hdv[i]-hdv[i-1] >= diff:
hpd.append(round(hdv[i-1], roundto))
hpd.append(round(hdv[i], roundto))
hpd.append(round(max(hdv), roundto))
ite = iter(hpd)
hpd = list(zip(ite, ite))
modes = []
for value in hpd:
x_hpd = x[(x > value[0]) & (x < value[1])]
y_hpd = y[(x > value[0]) & (x < value[1])]
modes.append(round(x_hpd[np.argmax(y_hpd)], roundto))
return hpd, x, y, modes | 5,334,384 |
def ecdf_numerical(data, cols_num, col_target=None, grid_c=3, w=15, h_factor=3.5):
"""
Empirical Cumulative Distribution Function plot. Useful for KS-test.
Parameters
----------
data : pandas.DataFrame
dataframe without infinite values. will drop null values while plotting.
cols_num : list of str
interval or ratio column in data
col_target : str, optional
the target variable we want to distingusih the cols_num distributino
grid_c : int, default=3
number of grid columns
w : int, default=15
figsize witdh arguments
h_factor : float, default=3.5
height of small plot
"""
n = math.ceil(len(cols_num) / grid_c)
fig, ax = plt.subplots(n, grid_c, figsize=(w, h_factor*n))
sorted_cols_num = sorted(cols_num) # we wnat it sorted for easier search
if col_target is None:
for col, a in zip(sorted_cols_num, ax.reshape(-1)):
ecdf = sm.distributions.ECDF(data[col])
ax.plot(ecdf.x, ecdf.y)
a.set_xlabel(col)
else:
sorted_cols_target = sorted(data[col_target].unique())
if len(sorted_cols_target) > 1 and len(sorted_cols_target) <= 5: # > 5 will be too crowded
for col, a in zip(sorted_cols_num, ax.reshape(-1)):
for t in sorted_cols_target:
ecdf = sm.distributions.ECDF(data[data[col_target] == t][col].dropna())
a.plot(ecdf.x, ecdf.y)
a.legend(sorted_cols_target)
a.set_xlabel(col)
else: # most probably regression analysis
for col, a in zip(sorted_cols_num, ax.reshape(-1)):
ecdf = sm.distributions.ECDF(data[col].dropna())
a.plot(ecdf.x, ecdf.y)
a.set_xlabel(col) | 5,334,385 |
def install_compressed_xml(corpus, xmlfile, out, export_path, host):
"""Install xml file on remote server."""
if not host:
raise Exception("No host provided! Export not installed.")
filename = corpus + ".xml.bz2"
remote_file_path = os.path.join(export_path, filename)
util.install_file(host, xmlfile, remote_file_path)
out.write("") | 5,334,386 |
def get_internal_modules(key='exa'):
"""
Get a list of modules belonging to the given package.
Args:
key (str): Package or library name (e.g. "exa")
"""
key += '.'
return [v for k, v in sys.modules.items() if k.startswith(key)] | 5,334,387 |
def emph_rule(phrase: str) -> Rule:
"""
Check if the phrase only ever appears with or without a surrounding \\emph{...}.
For example, "et al." can be spelled like "\\emph{et al.}" or "et al.", but it should be
consistent.
"""
regex = r"(?:\\emph\{)?"
regex += r"(?:" + re.escape(phrase) + r")"
regex += r"(?:\})?"
return Rule(name=phrase, regex=re.compile(regex)) | 5,334,388 |
def parse_amwg_obs(file):
"""Atmospheric observational data stored in"""
file = pathlib.Path(file)
info = {}
try:
stem = file.stem
split = stem.split('_')
source = split[0]
temporal = split[-2]
if len(temporal) == 2:
month_number = int(temporal)
time_period = 'monthly'
temporal = datetime(2020, month_number, 1).strftime('%b').upper()
elif temporal == 'ANN':
time_period = 'annual'
else:
time_period = 'seasonal'
with xr.open_dataset(file, chunks={}, decode_times=False) as ds:
variable_list = [var for var in ds if 'long_name' in ds[var].attrs]
info = {
'source': source,
'temporal': temporal,
'time_period': time_period,
'variable': variable_list,
'path': str(file),
}
return info
except Exception:
return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()} | 5,334,389 |
def taylor(x,f,i,n):
"""taylor(x,f,i,n):
This function approximates the function f over the domain x,
using a taylor expansion centered at x[i]
with n+1 terms (starts counting from 0).
Args:
x: The domain of the function
f: The function that will be expanded/approximated
i: The ith term in the domain x that the expansion is centered around
n: The number of terms in the expansion (n+1 terms)
Returns:
(x,fapprox): A pair of numpy arrays where x is the original domain array and
f approx is the approximation of f over all of the domain points x using the
taylor expansion.
"""
a = x[i]
N = np.size(x)
fa = f[i]*np.ones_like(x)
D = ac.derivative(x[0],x[N-1],N)
fact = 1
fapprox = fa
Dk = np.eye(N)
for k in range(1,n+1):
fact = fact*k
Dk = np.matmul(Dk,D)
#fapprox += (np.matmul(np.matmul(Dk,fa),((x-a)**k)))/fact
fapprox = np.add(fapprox, (np.matmul(np.matmul(Dk,fa),((x-a)**k)))/fact, out=fapprox, casting="unsafe")
return (x,fapprox) | 5,334,390 |
def precision_and_recall_at_k(ground_truth, prediction, k=-1):
"""
:param ground_truth:
:param prediction:
:param k: how far down the ranked list we look, set to -1 (default) for all of the predictions
:return:
"""
if k == -1:
k = len(prediction)
prediction = prediction[0:k]
numer = len(set(ground_truth).intersection(set(prediction)))
prec = numer / k
recall = numer / len(ground_truth)
return prec, recall | 5,334,391 |
def bubble_sort(array: list, key_func=lambda x: x) -> list:
"""
best:O(N) avg:O(N^2) worst:O(N^2)
"""
if key_func is not None:
assert isfunction(key_func)
for pos in range(0, len(array)):
for idx in range(0, len(array) - pos - 1):
if key_func(array[idx]) > key_func(array[idx + 1]):
array[idx], array[idx + 1] = array[idx + 1], array[idx]
return array | 5,334,392 |
def limit_paulis(mat, n=5, sparsity=None):
"""
Limits the number of Pauli basis matrices of a hermitian matrix to the n
highest magnitude ones.
Args:
mat (np.ndarray): Input matrix
n (int): number of surviving Pauli matrices (default=5)
sparsity (float): sparsity of matrix < 1
Returns:
scipy.sparse.csr_matrix: matrix
"""
# pylint: disable=import-outside-toplevel
from qiskit.aqua.operators import MatrixOperator
from qiskit.aqua.operators.legacy.op_converter import to_weighted_pauli_operator
# Bringing matrix into form 2**Nx2**N
__l = mat.shape[0]
if np.log2(__l) % 1 != 0:
k = int(2 ** np.ceil(np.log2(__l)))
m = np.zeros([k, k], dtype=np.complex128)
m[:__l, :__l] = mat
m[__l:, __l:] = np.identity(k - __l)
mat = m
# Getting Pauli matrices
# pylint: disable=invalid-name
op = MatrixOperator(matrix=mat)
op = to_weighted_pauli_operator(op)
paulis = sorted(op.paulis, key=lambda x: abs(x[0]), reverse=True)
g = 2**op.num_qubits
mat = scipy.sparse.csr_matrix(([], ([], [])), shape=(g, g),
dtype=np.complex128)
# Truncation
if sparsity is None:
for pa in paulis[:n]:
mat += pa[0] * pa[1].to_spmatrix()
else:
idx = 0
while mat[:__l, :__l].nnz / __l ** 2 < sparsity:
mat += paulis[idx][0] * paulis[idx][1].to_spmatrix()
idx += 1
n = idx
mat = mat.toarray()
return mat[:__l, :__l] | 5,334,393 |
def list_clusters(configuration: Configuration = None,
secrets: Secrets = None) -> AWSResponse:
"""
List EKS clusters available to the authenticated account.
"""
client = aws_client("eks", configuration, secrets)
logger.debug("Listing EKS clusters")
return client.list_clusters() | 5,334,394 |
def tar_cat(tar, path):
"""
Reads file and returns content as bytes
"""
mem = tar.getmember(path)
with tar.extractfile(mem) as f:
return f.read() | 5,334,395 |
def __get_base_name(input_path):
""" /foo/bar/test/folder/image_label.ext --> test/folder/image_label.ext """
return '/'.join(input_path.split('/')[-3:]) | 5,334,396 |
def or_ipf28(xpath):
"""change xpath to match ipf <2.8 or >2.9 (for noise range)"""
xpath28 = xpath.replace('noiseRange', 'noise').replace('noiseAzimuth', 'noise')
if xpath28 != xpath:
xpath += " | %s" % xpath28
return xpath | 5,334,397 |
def make_form(x, current_dict, publication_dict):
"""Create or update a Taxon of rank Form.
Some forms have no known names between species and form.
These keep the form name in the ``infra_name`` field.
e.g.
Caulerpa brachypus forma parvifolia
Others have a known subspecies/variety/subvariety name in the
``infra_name`` field, and keep the form name in ``infra_name2``:
e.g.
Caulerpa cupressoides var. lycopodium forma elegans
Arguments
x An instance of HbvSpecies, rank_name "Variety"
current_dict A lookup dict for is_current
publication_dict A lookup dict for publication_status
taxon_dict A lookup dict for parent species name to Taxon instance
Return The created or updated instance of Taxon.
"""
if not tax_models.HbvParent.objects.filter(name_id=x.name_id).exists():
logger.warn("[make_species] missing HbvParent with name_id {0}".format(x.name_id))
return None
parent_nid = tax_models.HbvParent.objects.get(name_id=x.name_id).parent_nid
dd = dict(
name=force_text(x.infra_name) if force_text(
x.infra_rank) == 'forma' else force_text(x.infra_name2),
rank=tax_models.Taxon.RANK_FORMA,
current=current_dict[x.is_current],
parent=tax_models.Taxon.objects.get(name_id=parent_nid),
author=x.author.replace("(", "").replace(")", "").strip() if x.author else "",
field_code=x.species_code
)
if x.informal is not None:
dd['publication_status'] = publication_dict[x.informal]
obj, created = tax_models.Taxon.objects.update_or_create(name_id=x.name_id, defaults=dd)
action = "Created" if created else "Updated"
logger.info("[make_form] {0} {1}.".format(action, obj))
return obj | 5,334,398 |
def render_view(func):
"""
Render this view endpoint's specified template with the provided context, with additional context parameters
as specified by context_config().
@app.route('/', methods=['GET'])
@render_view
def view_function():
return 'template_name.html', {'context': 'details'}
"""
@wraps(func)
def decorated_view(*args, **kwargs):
template, context = func(*args, **kwargs)
if 'config' in context:
context['config'].update(context_config())
else:
context['config'] = context_config()
return render_template(template, **context)
return decorated_view | 5,334,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.