content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-f", "--format"):
param_format = a
elif o in ("-i", "--max-intron"):
param_max_intron = int(a)
elif o in ("-d", "--max-difference"):
param_max_difference = int(a)
elif o in ("-o", "--max-overlap"):
param_max_overlap = int(a)
elif o in ("-c", "--contigs-tsv-file"):
param_filename_contigs = a
elif o in ("-g", "--genome-file"):
param_genome_file = a
elif o in ("-j", "--join-pattern"):
param_filename_join_pattern = a
elif o in ("-s", "--filename-sizes"):
param_filename_sizes = a
if len(args) > 0:
print USAGE, "no arguments required."
sys.exit(2)
print E.GetHeader()
print E.GetParams()
ninput = 0
max_id = 0
contig_sizes = Genomics.ReadContigSizes(open(param_filename_contigs, "r"))
##########################################################################
# reading predictions
contig = {}
tmp_predictions, filename_tmp_predictions = tempfile.mkstemp()
os.close(tmp_predictions)
tmp_predictions = PredictionFile.PredictionFile()
tmp_predictions.open(filename_tmp_predictions, "w")
if param_format == "predictions":
last_entry = None
entries = []
for line in sys.stdin:
if line[0] == "#":
continue
entry = PredictionParser.PredictionParserEntry(expand=1)
try:
entry.Read(line)
except ValueError:
print "# warning: parsing error in line %s" % line[:-1]
continue
ninput += 1
max_id = max(entry.mPredictionId, max_id)
if entry.mSbjctToken not in contig:
contig[entry.mSbjctToken] = BoundaryPredictions()
contig[entry.mSbjctToken].update(entry)
tmp_predictions.append(entry)
if param_loglevel >= 4:
for c in contig.keys():
print "######start of %s #####################################################" % c
print "#", str(contig[c])
print "######end of %s #####################################################" % c
tmp_predictions.close()
max_id += 1
first_pseudo_id = max_id
cc = contig.keys()
##########################################################################
# get pairs of colinear predictions on different contigs
results = []
if param_loglevel >= 1:
print "# finished parsing %i contigs" % len(cc)
sys.stdout.flush()
for c1 in range(len(cc) - 1):
if param_loglevel >= 1:
print "# processing: %i/%i" % (c1 + 1, len(cc))
sys.stdout.flush()
for c2 in range(c1 + 1, len(cc)):
r = CheckCollinearity(contig[cc[c1]], contig[cc[c2]])
if r and param_loglevel >= 3:
print "# --------------------------------------------------------"
print "# %s and %s are collinear" % (cc[c1], cc[c2])
for r1, r2 in r:
print "# ----------------------"
print "#", str(r1)
print "#", str(r2)
results += r
##########################################################################
# cluster co-linear predictions on different contigs by sbjct_token
queries = {}
for r1, r2 in results:
if r1.mQueryToken not in queries:
queries[r1.mQueryToken] = {}
queries[r1.mQueryToken][r1.mPredictionId] = r1
queries[r1.mQueryToken][r2.mPredictionId] = r2
nnew = 0
ncancelled = 0
# set of contigs joined
map_contig2new = {}
# names of new contigs
new_contigs = {}
# remove old contig file, if it already exists.
if param_filename_join_pattern and "%s" not in param_filename_join_pattern:
if os.path.exists(param_filename_join_pattern):
os.remove(param_filename_join_pattern)
if param_filename_sizes:
outfile_sizes = open(param_filename_sizes, "w")
else:
outfile_sizes = None
##########################################################################
# join contigs
for q in queries.keys():
s = queries[q].values()
s.sort(
lambda x, y: cmp((x.mQueryFrom, x.mQueryTo), (y.mQueryFrom, y.mQueryTo)))
if param_loglevel >= 2:
print "# -----------------------------------------------"
print "# predictions to be joined for query=", q
for p in s:
print "#", str(p)
print "# -----------------------------------------------"
new_prediction = s[0].GetCopy()
last_contig_size = contig_sizes[new_prediction.mSbjctToken]
do_cancel = False
contigs = []
if param_filename_join_pattern:
contigs.append(GetContig(new_prediction))
for p in s[1:]:
overlap = new_prediction.mQueryTo - p.mQueryFrom + 1
if overlap > 0:
if overlap > param_max_overlap or \
100 * (p.mQueryTo - p.mQueryFrom + 1) / overlap > param_max_relative_overlap:
print "# dodgy prediction sequence (overlap = %i), joining of contigs cancelled." % overlap
sys.stdout.flush()
do_cancel = True
break
if param_filename_join_pattern:
contigs.append(GetContig(p))
new_prediction.Add(p,
combine_contig=True,
allow_overlap=True,
contig_size=last_contig_size)
last_contig_size += contig_sizes[p.mSbjctToken]
if do_cancel:
ncancelled += 1
continue
nnew += 1
new_prediction.mPredictionId = max_id
new_prediction.mSbjctStrand = "+"
max_id += 1
print "# joining\t" + string.join(map(lambda x: x.mSbjctToken + x.mSbjctStrand, s), "\t")
if param_filename_join_pattern and len(contigs) > 0:
new_contig = string.join(
map(lambda x: x[0], contigs), param_separator_contigs)
# do not write the same contig twice
if new_contig not in new_contigs:
new_contigs[new_contig] = 1
lcontig = len(string.join(map(lambda x: x[1], contigs), ""))
# check if contig already part of a different joined contig
l = 0
for id, sequence, switch in contigs:
if id in map_contig2new:
print "# WARNING: contig %s already joined" % id
map_contig2new[id] = (
new_contig, switch, l, lcontig - l - len(sequence))
l += len(sequence)
# write new contig
if "%s" in param_filename_join_pattern:
filename_genome = param_filename_join_pattern % new_contig
outfile = open(filename_genome, "w")
else:
filename_genome = param_filename_join_pattern
outfile = open(filename_genome, "a")
if outfile_sizes:
outfile_sizes.write("%s\t%i\t0\n" % (new_contig, lcontig))
outfile.write(
">" + new_contig + "\n" + string.join(map(lambda x: x[1], contigs), "") + "\n")
outfile.close()
print str(new_prediction)
if outfile_sizes:
outfile_sizes.close()
##########################################################################
# move other predictions into the new contigs by translating their
# coordinates
tmp_predictions.open(mode="r")
noutput = 0
ntranslated = 0
for p in tmp_predictions:
if p.mSbjctToken in map_contig2new:
p.mSbjctToken, switch, offset_pos, offset_neg = map_contig2new[
p.mSbjctToken]
if (switch and p.mSbjctStrand == "+") or \
(not switch and p.mSbjctStrand == "-"):
offset = offset_neg
else:
offset = offset_pos
# change strand for inverted contigs
if switch:
if p.mSbjctStrand == "+":
p.mSbjctStrand = "-"
else:
p.mSbjctStrand = "+"
p.mSbjctGenomeFrom += offset
p.mSbjctGenomeTo += offset
ntranslated += 1
noutput += 1
print str(p)
if param_loglevel >= 1:
print "## nread=%i, nnew=%i, noutput=%i, ntranslated=%i, first_id=%i" %\
(ninput, nnew, noutput, ntranslated, first_pseudo_id)
print "# ncontigs=%i, npairs=%i, nqueries=%i, nnew=%i, njoined=%i, ncancelled=%i" %\
(len(contig), len(results), len(queries),
len(new_contigs), len(map_contig2new), ncancelled)
os.remove(filename_tmp_predictions)
print E.GetFooter()
| 23,600
|
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# get transform component output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
# read input data
train_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.train_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
eval_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.eval_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
# instantiate model
model = build_model(
fn_args.custom_config["input_features"],
fn_args.custom_config["window_size"],
fn_args.custom_config["outer_units"],
fn_args.custom_config["inner_units"],
)
# tf callbacks for tensorboard
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir,
update_freq="batch",
)
# validation_data = list(eval_dataset.as_numpy_iterator())
# train model
model.fit(
train_dataset,
# train_dataset.as_numpy_iterator(),
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback],
)
# Build signatures
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def _serve_tf_examples_fn(**input_features):
# """Returns the output to be used in the serving signature."""
preprocessed_features = model.tft_layer(input_features)
autoencoded_features = model(preprocessed_features)
return {
**{
f"input_features::{f}": input_features[f] for f in input_features.keys()
},
**{
f"preprocessed_features::{f}": preprocessed_features[f]
for f in preprocessed_features.keys()
},
# Output tensor names are of the form:
# lstm_autoencoder_model/decoder/{feature_name}/Reshape_1:0
**{
f"output_features::{f.name.split('/')[2]}": f
for f in autoencoded_features
},
}
_input_tf_specs = {
f: tf.TensorSpec(
shape=[None, fn_args.custom_config["window_size"]], dtype=tf.float32, name=f
)
for f in fn_args.custom_config["input_features"]
}
signatures = {
"serving_default": _serve_tf_examples_fn.get_concrete_function(
**_input_tf_specs
)
}
# Save model (this is the effective output of this function)
model.save(fn_args.serving_model_dir, save_format="tf", signatures=signatures)
| 23,601
|
def log_likelihood(X, Y, Z, data, boolean=True, **kwargs):
"""
Log likelihood ratio test for conditional independence. Also commonly known
as G-test, G-squared test or maximum likelihood statistical significance
test. Tests the null hypothesis that X is independent of Y given Zs.
Parameters
----------
X: int, string, hashable object
A variable name contained in the data set
Y: int, string, hashable object
A variable name contained in the data set, different from X
Z: list (array-like)
A list of variable names contained in the data set, different from X and Y.
This is the separating set that (potentially) makes X and Y independent.
Default: []
data: pandas.DataFrame
The dataset on which to test the independence condition.
boolean: bool
If boolean=True, an additional argument `significance_level` must
be specified. If p_value of the test is greater than equal to
`significance_level`, returns True. Otherwise returns False.
If boolean=False, returns the chi2 and p_value of the test.
Returns
-------
If boolean = False, Returns 3 values:
chi: float
The chi-squre test statistic.
p_value: float
The p_value, i.e. the probability of observing the computed chi-square
statistic (or an even higher value), given the null hypothesis
that X \u27C2 Y | Zs.
dof: int
The degrees of freedom of the test.
If boolean = True, returns:
independent: boolean
If the p_value of the test is greater than significance_level, returns True.
Else returns False.
References
----------
[1] https://en.wikipedia.org/wiki/G-test
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))
>>> data['E'] = data['A'] + data['B'] + data['C']
>>> log_likelihood(X='A', Y='C', Z=[], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D'], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D', 'E'], data=data, boolean=True, significance_level=0.05)
False
"""
return power_divergence(
X=X, Y=Y, Z=Z, data=data, boolean=boolean, lambda_="log-likelihood", **kwargs
)
| 23,602
|
def conjugada_matriz_vec(mat:list):
"""
Funcion que realiza la conjugada de una matriz o vector complejo.
:param mat: Lista que representa la matriz o vector complejo.
:return: lista que representa la matriz o vector resultante.
"""
fila = len(mat)
columnas = len(mat[0])
resul = []
for i in range(fila):
resul.append([])
for j in range(columnas):
resul[i].append(conjugado_complejos(mat[i][j]))
return resul
| 23,603
|
def features2matrix(feature_list):
"""
Args:
feature_list (list of Feature):
Returns:
(np.ndarray, list of str): matrix and list of key of features
"""
matrix = np.array([feature.values for feature in feature_list], dtype=float)
key_lst = [feature.key for feature in feature_list]
return matrix, key_lst
| 23,604
|
def GenDataFrameFromPath(path, pattern='*.png', fs=False):
"""
generate a dataframe for all file in a dir with the specific pattern of file name.
use: GenDataFrameFromPath(path, pattern='*.png')
"""
fnpaths = list(path.glob(pattern))
df = pd.DataFrame(dict(zip(['fnpath'], [fnpaths])))
df['dir'] = df['fnpath'].apply(lambda x: x.parent)
df['fn'] = df['fnpath'].apply(lambda x: x.name)
if fs:
df['size'] = df['fnpath'].apply(lambda x: os.path.getsize(x))
return df
| 23,605
|
def is_heading(line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
has_cattle = re.search(r'steer?|hfrs?|calves|cows?|bulls?', line, re.IGNORECASE)
has_price = re.search(r'\$[0-9]+\.[0-9]{2}', line)
return bool(has_cattle) and not bool(has_price)
| 23,606
|
def get_channel_messages(channel_id):
""" Holt fuer einen bestimmten Kanal die Nachrichten aus der Datenbank"""
session = get_cassandra_session()
future = session.execute_async("SELECT * FROM messages WHERE channel_id=%s", (channel_id,))
try:
rows = future.result()
except Exception:
log.exeception()
messages = []
for row in rows:
messages.append({
'channel_id': row.channel_id,
'message_id': row.message_id,
'author_id': row.author_id,
'message': row.message
})
return jsonify({'messages': messages}), 200
| 23,607
|
def train(traj,
pol, targ_pol, qf, targ_qf,
optim_pol, optim_qf,
epoch, batch_size, # optimization hypers
tau, gamma, # advantage estimation
sampling,
):
"""
Train function for deep deterministic policy gradient
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
targ_pol : Pol
Target Policy.
qf : SAVfunction
Q function.
targ_qf : SAVfunction
Target Q function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qf : torch.optim.Optimizer
Optimizer for Q function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
qf_losses = []
logger.log("Optimizing...")
for batch in traj.iterate(batch_size, epoch):
qf_bellman_loss = lf.bellman(
qf, targ_qf, targ_pol, batch, gamma, sampling=sampling)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
pol_loss = lf.ag(pol, qf, batch, sampling)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for q, targ_q, p, targ_p in zip(qf.parameters(), targ_qf.parameters(), pol.parameters(), targ_pol.parameters()):
targ_p.detach().copy_((1 - tau) * targ_p.detach() + tau * p.detach())
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
pol_losses.append(pol_loss.detach().cpu().numpy())
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses,
QfLoss=qf_losses,
)
| 23,608
|
def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, capfd):
"""
Test : Create secure wireless connection w/failure
"""
with pytest.raises(SystemExit):
nmcli.main()
assert nmcli.Nmcli.execute_command.call_count == 2
arg_list = nmcli.Nmcli.execute_command.call_args_list
get_available_options_args, get_available_options_kw = arg_list[0]
assert get_available_options_args[0][0] == '/usr/bin/nmcli'
assert get_available_options_args[0][1] == 'con'
assert get_available_options_args[0][2] == 'edit'
assert get_available_options_args[0][3] == 'type'
assert get_available_options_args[0][4] == 'wifi'
get_available_options_data = get_available_options_kw['data'].split()
for param in ['print', '802-11-wireless-security',
'quit', 'yes']:
assert param in get_available_options_data
add_args, add_kw = arg_list[1]
assert add_args[0][0] == '/usr/bin/nmcli'
assert add_args[0][1] == 'con'
assert add_args[0][2] == 'add'
assert add_args[0][3] == 'type'
assert add_args[0][4] == 'wifi'
assert add_args[0][5] == 'con-name'
assert add_args[0][6] == 'non_existent_nw_device'
add_args_text = list(map(to_text, add_args[0]))
for param in ['connection.interface-name', 'wireless_non_existant',
'ipv4.addresses', '10.10.10.10/24',
'802-11-wireless.ssid', 'Brittany',
'802-11-wireless-security.key-mgmt', 'wpa-psk']:
assert param in add_args_text
out, err = capfd.readouterr()
results = json.loads(out)
assert results.get('failed')
assert 'changed' not in results
| 23,609
|
async def bot_start(message: types.Message):
"""
The function is designed to welcome a new bot user.
"""
await types.ChatActions.typing()
first_name = message.from_user.first_name
devs_id = await mention_html("no_n1ce", "Nikita")
await message.answer(
text=f"Привет, {first_name}! Я создатель этого разговорного чат-бота 🤔."
f" По всем вопросам, можешь написать мне {devs_id}!",
parse_mode=types.ParseMode.HTML)
| 23,610
|
def upsample_gtiff(files: list, scale: float) -> list:
"""
Performs array math to artificially increase the resolution of a geotiff. No interpolation of values. A scale
factor of X means that the length of a horizontal and vertical grid cell decreases by X. Be careful, increasing the
resolution by X increases the file size by ~X^2
Args:
files: A list of absolute paths to the appropriate type of files (even if len==1)
scale: A positive integer used as the multiplying factor to increase the resolution.
Returns:
list of paths to the geotiff files created
"""
# Read raster dimensions
raster_dim = rasterio.open(files[0])
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
affine_resampled = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * scale, height * scale)
# keep track of the new files
new_files = []
# Resample each GeoTIFF
for file in files:
rio_obj = rasterio.open(file)
data = rio_obj.read(
out_shape=(int(rio_obj.height * scale), int(rio_obj.width * scale)),
resampling=Resampling.nearest
)
# Convert new resampled array from 3D to 2D
data = np.squeeze(data, axis=0)
# Specify the filepath of the resampled raster
new_filepath = os.path.splitext(file)[0] + '_upsampled.tiff'
new_files.append(new_filepath)
# Save the GeoTIFF
with rasterio.open(
new_filepath,
'w',
driver='GTiff',
height=data.shape[0],
width=data.shape[1],
count=1,
dtype=data.dtype,
nodata=np.nan,
crs=rio_obj.crs,
transform=affine_resampled,
) as dst:
dst.write(data, 1)
return new_files
| 23,611
|
def merge_dicts(dict1, dict2):
""" _merge_dicts
Merges two dictionaries into one.
INPUTS
@dict1 [dict]: First dictionary to merge.
@dict2 [dict]: Second dictionary to merge.
RETURNS
@merged [dict]: Merged dictionary
"""
merged = {**dict1, **dict2}
return merged
| 23,612
|
def check_api_acls(acls, optional=False):
"""Checks if the user provided an API token with its request and if
this token allows the user to access the endpoint desired.
:arg acls: A list of access control
:arg optional: Only check the API token is valid. Skip the ACL validation.
"""
import pagure.api
import pagure.lib.query
if authenticated():
return
flask.g.token = None
flask.g.fas_user = None
token = None
token_str = None
if "Authorization" in flask.request.headers:
authorization = flask.request.headers["Authorization"]
if "token" in authorization:
token_str = authorization.split("token", 1)[1].strip()
token_auth = False
error_msg = None
if token_str:
token = pagure.lib.query.get_api_token(flask.g.session, token_str)
if token:
if token.expired:
error_msg = "Expired token"
else:
flask.g.authenticated = True
# Some ACLs are required
if acls:
token_acls_set = set(token.acls_list)
needed_acls_set = set(acls or [])
overlap = token_acls_set.intersection(needed_acls_set)
# Our token has some of the required ACLs: auth successful
if overlap:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
# Our token has none of the required ACLs -> auth fail
else:
error_msg = "Missing ACLs: %s" % ", ".join(
sorted(set(acls) - set(token.acls_list))
)
# No ACL required
else:
if optional:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
else:
error_msg = "Invalid token"
elif optional:
return
else:
error_msg = "Invalid token"
if not token_auth:
output = {
"error_code": pagure.api.APIERROR.EINVALIDTOK.name,
"error": pagure.api.APIERROR.EINVALIDTOK.value,
"errors": error_msg,
}
jsonout = flask.jsonify(output)
jsonout.status_code = 401
return jsonout
| 23,613
|
def get_data():
"""
Return data files
:return:
"""
data = {}
for df in get_manifest():
d, f = os.path.split(df)
if d not in data:
data[d] = [df]
else:
data[d].append(df)
return list(data.items())
| 23,614
|
def china_province_head_fifteen():
"""
各省前15数据
:return:
"""
return db_request_service.get_china_province_head_fifteen(ChinaTotal, ChinaProvince)
| 23,615
|
def exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False,
sed_doc_executer_supported_features=(Task, Report, DataSet, Plot2D, Curve, Plot3D, Surface),
report_formats=None, plot_formats=None,
bundle_outputs=None, keep_individual_outputs=None,
sed_doc_executer_logged_features=(Task, Report, DataSet, Plot2D, Curve, Plot3D, Surface)):
""" Execute the SED-ML files in a COMBINE/OMEX archive (execute tasks and save outputs)
Args:
sed_doc_executer (:obj:`types.FunctionType`): function to execute each SED document in the archive.
The function must implement the following interface::
def sed_doc_executer(doc, working_dir, base_out_path, rel_out_path=None,
apply_xml_model_changes=False, report_formats=None, plot_formats=None,
log=None, indent=0):
''' Execute the tasks specified in a SED document and generate the specified outputs
Args:
doc (:obj:`SedDocument` of :obj:`str`): SED document or a path to SED-ML file which defines a SED document
working_dir (:obj:`str`): working directory of the SED document (path relative to which models are located)
out_path (:obj:`str`): path to store the outputs
* CSV: directory in which to save outputs to files
``{out_path}/{rel_out_path}/{report.id}.csv``
* HDF5: directory in which to save a single HDF5 file (``{out_path}/reports.h5``),
with reports at keys ``{rel_out_path}/{report.id}`` within the HDF5 file
rel_out_path (:obj:`str`, optional): path relative to :obj:`out_path` to store the outputs
apply_xml_model_changes (:obj:`bool`, optional): if :obj:`True`, apply any model changes specified in the SED-ML file
report_formats (:obj:`list` of :obj:`ReportFormat`, optional): report format (e.g., csv or h5)
plot_formats (:obj:`list` of :obj:`PlotFormat`, optional): plot format (e.g., pdf)
log (:obj:`SedDocumentLog`, optional): execution status of document
indent (:obj:`int`, optional): degree to indent status messages
'''
archive_filename (:obj:`str`): path to COMBINE/OMEX archive
out_dir (:obj:`str`): path to store the outputs of the archive
* CSV: directory in which to save outputs to files
``{ out_dir }/{ relative-path-to-SED-ML-file-within-archive }/{ report.id }.csv``
* HDF5: directory in which to save a single HDF5 file (``{ out_dir }/reports.h5``),
with reports at keys ``{ relative-path-to-SED-ML-file-within-archive }/{ report.id }`` within the HDF5 file
apply_xml_model_changes (:obj:`bool`): if :obj:`True`, apply any model changes specified in the SED-ML files before
calling :obj:`task_executer`.
sed_doc_executer_supported_features (:obj:`list` of :obj:`type`, optional): list of the types of elements that the
SED document executer supports. Default: tasks, reports, plots, data sets, curves, and surfaces.
report_formats (:obj:`list` of :obj:`ReportFormat`, optional): report format (e.g., csv or h5)
plot_formats (:obj:`list` of :obj:`PlotFormat`, optional): report format (e.g., pdf)
bundle_outputs (:obj:`bool`, optional): if :obj:`True`, bundle outputs into archives for reports and plots
keep_individual_outputs (:obj:`bool`, optional): if :obj:`True`, keep individual output files
sed_doc_executer_logged_features (:obj:`list` of :obj:`type`, optional): list of the types fo elements which that
the SED document executer logs. Default: tasks, reports, plots, data sets, curves, and surfaces.
Returns:
:obj:`CombineArchiveLog`: log
"""
config = get_config()
# process arguments
if report_formats is None:
report_formats = [ReportFormat(format_value) for format_value in config.REPORT_FORMATS]
if plot_formats is None:
plot_formats = [PlotFormat(format_value) for format_value in config.PLOT_FORMATS]
if bundle_outputs is None:
bundle_outputs = config.BUNDLE_OUTPUTS
if keep_individual_outputs is None:
keep_individual_outputs = config.KEEP_INDIVIDUAL_OUTPUTS
verbose = config.VERBOSE
# create temporary directory to unpack archive
archive_tmp_dir = tempfile.mkdtemp()
# unpack archive and read metadata
archive = CombineArchiveReader.run(archive_filename, archive_tmp_dir)
# determine files to execute
sedml_contents = get_sedml_contents(archive)
if not sedml_contents:
warn("COMBINE/OMEX archive '{}' does not contain any executing SED-ML files".format(archive_filename), NoSedmlWarning)
# print summary of SED documents
print(get_summary_sedml_contents(archive, archive_tmp_dir))
# create output directory
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# initialize status and output
supported_features = sed_doc_executer_supported_features
logged_features = sed_doc_executer_logged_features
if SedDocument not in supported_features:
supported_features = tuple(list(supported_features) + [SedDocument])
if SedDocument not in logged_features:
logged_features = tuple(list(logged_features) + [SedDocument])
log = init_combine_archive_log(archive, archive_tmp_dir,
supported_features=supported_features,
logged_features=logged_features)
log.status = Status.RUNNING
log.out_dir = out_dir
log.export()
start_time = datetime.datetime.now()
# execute SED-ML files: execute tasks and save output
exceptions = []
for i_content, content in enumerate(sedml_contents):
content_filename = os.path.join(archive_tmp_dir, content.location)
content_id = os.path.relpath(content_filename, archive_tmp_dir)
print('Executing SED-ML file {}: {} ...'.format(i_content, content_id))
doc_log = log.sed_documents[content_id]
doc_log.status = Status.RUNNING
doc_log.export()
with capturer.CaptureOutput(merged=True, relay=verbose) as captured:
doc_start_time = datetime.datetime.now()
try:
working_dir = os.path.dirname(content_filename)
sed_doc_executer(content_filename,
working_dir,
out_dir,
os.path.relpath(content_filename, archive_tmp_dir),
apply_xml_model_changes=apply_xml_model_changes,
report_formats=report_formats,
plot_formats=plot_formats,
log=doc_log,
indent=1)
doc_log.status = Status.SUCCEEDED
except Exception as exception:
exceptions.append(exception)
doc_log.status = Status.FAILED
doc_log.exception = exception
# update status
doc_log.output = captured.get_bytes().decode()
doc_log.duration = (datetime.datetime.now() - doc_start_time).total_seconds()
doc_log.export()
print('')
if bundle_outputs:
print('Bundling outputs ...')
# bundle CSV files of reports into zip archive
archive_paths = [os.path.join(out_dir, '**', '*.' + format.value) for format in report_formats if format != ReportFormat.h5]
archive = build_archive_from_paths(archive_paths, out_dir)
if archive.files:
ArchiveWriter().run(archive, os.path.join(out_dir, config.REPORTS_PATH))
# bundle PDF files of plots into zip archive
archive_paths = [os.path.join(out_dir, '**', '*.' + format.value) for format in plot_formats]
archive = build_archive_from_paths(archive_paths, out_dir)
if archive.files:
ArchiveWriter().run(archive, os.path.join(out_dir, config.PLOTS_PATH))
# cleanup temporary files
print('Cleaning up ...')
if not keep_individual_outputs:
path_patterns = (
[os.path.join(out_dir, '**', '*.' + format.value) for format in report_formats if format != ReportFormat.h5]
+ [os.path.join(out_dir, '**', '*.' + format.value) for format in plot_formats]
)
for path_pattern in path_patterns:
for path in glob.glob(path_pattern, recursive=True):
os.remove(path)
for dir_path, dir_names, file_names in os.walk(out_dir, topdown=False):
for dir_name in list(dir_names):
full_dir_name = os.path.join(dir_path, dir_name)
if not os.path.isdir(full_dir_name):
dir_names.remove(dir_name)
elif not os.listdir(full_dir_name):
# not reachable because directory would
# have already been removed by the iteration for the directory
shutil.rmtree(full_dir_name) # pragma: no cover
dir_names.remove(dir_name) # pragma: no cover
if not dir_names and not file_names:
shutil.rmtree(dir_path)
shutil.rmtree(archive_tmp_dir)
# update status
log.status = Status.FAILED if exceptions else Status.SUCCEEDED
log.duration = (datetime.datetime.now() - start_time).total_seconds()
log.finalize()
log.export()
# summarize execution
print('')
print('============= SUMMARY =============')
print(get_summary_combine_archive_log(log))
# raise exceptions
if exceptions:
msg = 'The COMBINE/OMEX did not execute successfully:\n\n {}'.format(
'\n\n '.join(str(exceptions).replace('\n', '\n ') for exceptions in exceptions))
raise CombineArchiveExecutionError(msg)
# return log
return log
| 23,616
|
def pipeline(x_train,
y_train,
x_test,
y_test,
param_dict=None,
problem='classification'):
"""Trains and evaluates a DNN classifier.
Args:
x_train: np.array or scipy.sparse.*matrix array of features of training data
y_train: np.array 1-D array of class labels of training data
x_test: np.array or scipy.sparse.*matrix array of features of test data
y_test: np.array 1-D array of class labels of the test data
param_dict: {string: ?} dictionary of parameters of their values
problem: string type of learning problem; values = 'classification',
'regression'
Returns:
model: Keras.models.Model
trained Keras model
metrics: {str: float}
dictionary of metric scores
"""
assert problem in ['classification', 'regression']
if param_dict is None:
param_dict = {'epochs': 10, 'batch_size': 256}
num_feature = x_train.shape[1]
is_sparse = sparse.issparse(x_train)
param_dict = param_dict.copy()
num_epoch = param_dict.pop('epochs')
batch_size = param_dict.pop('batch_size')
if problem == 'regression':
num_output = 1
loss = 'mean_squared_error'
model_init = KerasRegressor
else:
num_output = len(set(y_train))
loss = 'categorical_crossentropy'
model_init = FunctionalKerasClassifier
build_fn = pseudo_partial(
keras_build_fn,
num_feature=num_feature,
num_output=num_output,
is_sparse=is_sparse,
loss=loss,
**param_dict)
model = model_init(
build_fn=build_fn,
epochs=num_epoch,
batch_size=batch_size,
shuffle=True,
verbose=False)
return generic_pipeline(
model, x_train, y_train, x_test, y_test, problem=problem)
| 23,617
|
def test_convolve_lti(u, h):
"""Test whether :func:`acoustics.signal.convolve` behaves properly when
performing a convolution with a time-invariant system.
"""
H = np.tile(h, (len(u), 1)).T
np.testing.assert_array_almost_equal(convolveLTV(u,H), convolveLTI(u,h))
np.testing.assert_array_almost_equal(convolveLTV(u,H,mode='full'), convolveLTI(u,h,mode='full'))
np.testing.assert_array_almost_equal(convolveLTV(u,H,mode='valid'), convolveLTI(u,h,mode='valid'))
np.testing.assert_array_almost_equal(convolveLTV(u,H,mode='same'), convolveLTI(u,h,mode='same'))
| 23,618
|
def p_command_statement_input_2(t):
"""
command_statement_input : object_list
"""
pass
| 23,619
|
def model_selection(modelname, num_out_classes,
dropout=None):
"""
:param modelname:
:return: model, image size, pretraining<yes/no>, input_list
"""
if modelname == 'xception':
return TransferModel(modelchoice='xception',
num_out_classes=num_out_classes)
# , 299, \True, ['image'], None
elif modelname == 'resnet18':
return TransferModel(modelchoice='resnet18', dropout=dropout,
num_out_classes=num_out_classes)
# , \224, True, ['image'], None
elif modelname == 'xception_concat':
return TransferModel(modelchoice='xception_concat',
num_out_classes=num_out_classes)
else:
raise NotImplementedError(modelname)
| 23,620
|
def insert_into_solr():
""" Inserts records into an empty solr index which has already been created. It inserts
frequencies of each noun phrase per file along with the arxiv identifier (from the file
name) and the published date (obtained from the arxiv_metadata Solr index)."""
solr = pysolr.Solr('http://localhost:8983/solr/nounphrases')
folderpath = '/home/ashwath/Files/NPFiles'
# Create an empty counter and update counts for phrases in each file inside the for loop.
phrase_counter = Counter()
for filepath in iglob(os.path.join(folderpath, '*.nps.txt')):
# Insert all the phrases in a file into Solr in a list
list_for_solr = []
with open(filepath, "r") as file:
# Get the filename without extension (only 1st 2 parts
# of filename after splitting)
filename= os.path.basename(filepath)
filename = '.'.join(filename.split('.')[0:2])
# published date is a default dict with lists as values.
published_date = search_solr(filename, 'arxiv_metadata', 'arxiv_identifier')
# Line is tab-separated (phrase, start, end). We want only phrase
# Don't add useless phrases to list 'phrases'. Use a generator
# expression instead of a list comprehension
phrases = (line.split("\t")[0].lower().strip() for line in file
if line.split("\t")[0].lower().strip() != "")
temp_phrase_counter = Counter(phrases)
for phrase, frequency in temp_phrase_counter.items():
solr_content = {}
solr_content['phrase'] = phrase
solr_content['num_occurrences'] = frequency
solr_content['published_date'] = published_date
solr_content['arxiv_identifier'] = filename
list_for_solr.append(solr_content)
# Upload to Solr file by file
solr.add(list_for_solr)
| 23,621
|
def make_binary_kernel(kernel_shape, sparsity):
"""
Create a random binary kernel
"""
filter_1d_sz = kernel_shape[0]
filter_2d_sz = filter_1d_sz * kernel_shape[1]
filter_3d_sz = filter_2d_sz * kernel_shape[2]
filter_4d_sz = filter_3d_sz * kernel_shape[3]
sparsity_int = np.ceil(sparsity * filter_4d_sz).astype(int)
pos_cutoff = sparsity_int // 2
binary_kernel = np.zeros(kernel_shape, dtype=np.int8)
# We need to pick randomly elements that wont be 0s
one_d_ind = np.random.choice(range(filter_4d_sz),
sparsity_int,
replace=False)
# Pick elements to be 1s
ind = (one_d_ind[:pos_cutoff] % filter_3d_sz
% filter_2d_sz
% filter_1d_sz,
one_d_ind[:pos_cutoff] % filter_3d_sz
% filter_2d_sz
// filter_1d_sz,
one_d_ind[:pos_cutoff] % filter_3d_sz
// filter_2d_sz,
one_d_ind[:pos_cutoff] // filter_3d_sz)
binary_kernel[ind] = 1
# Pick elements to be -1s
ind = (one_d_ind[pos_cutoff:] % filter_3d_sz
% filter_2d_sz
% filter_1d_sz,
one_d_ind[pos_cutoff:] % filter_3d_sz
% filter_2d_sz
// filter_1d_sz,
one_d_ind[pos_cutoff:] % filter_3d_sz
// filter_2d_sz,
one_d_ind[pos_cutoff:] // filter_3d_sz)
binary_kernel[ind] = -1
| 23,622
|
def Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket):
"""Packet_computeBinaryPacketLength(char const * startOfPossibleBinaryPacket) -> size_t"""
return _libvncxx.Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket)
| 23,623
|
def build_check_query(check_action: Action) -> str:
"""Builds check query from action item
Parameters
----------
check_action : action
check action to build query from
Returns
-------
str
query to execute
"""
return f"""
UPDATE todos
SET completed = 1
WHERE name LIKE '{check_action.pattern}';
"""
| 23,624
|
def gen_positions(n, n_boulders):
"""Generates state codes for boulders. Includes empty rows
Parameters:
n: number of rows/columns
n_boulders: number of boulders per row
return value:
Possible boulder and alien states
"""
boulder_positions=[]; b_p=[]
alien_positions_with_0=["{}1{}".format('0'*(n-i-1),'0'*(i)) for i in range(n)]+['0'*n]
if n_boulders==1:
return alien_positions_with_0, alien_positions_with_0[0:n]
else:
positions=[]
position_index=list(itertools.combinations(range(n), n_boulders))
for tup in position_index:
pos=''
for i in range(n):
if i in tup:
pos+='1'
else:
pos+='0'
positions.append(pos)
if '0'*n not in boulder_positions:
positions.append('0'*n)
return positions, alien_positions_with_0[0:n]
| 23,625
|
def analysis_linear_correlation(data1:np.array,
data2:np.array,
alpha:float = .05,
return_corr:bool = True,
verbose:bool = False)->bool:
"""
## Linear correlation analysis to test independence for numerical / ordinal variables.
data1, date2 -- 1D data to be tested.
alpha -- Significance level (default, 0.05).
return_corr -- If is True, return correlation value and his p-value (default, False).
verbose -- Display extra information (default, False).
return -- boolean according test result.
"""
# get types
type1 = data1.dtype
type2 = data2.dtype
# get size
n = len(data1)
# ord - ord
if type1 == "int64" and type2 == "int64":
# number of categories
ncat1 = len(np.unique(data1))
ncat2 = len(np.unique(data2))
# analysis
if ncat1 >= 5 and ncat2 >= 5:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - num
if type1 == "float64" and type2 == "float64":
# test if variables are gaussian
if n >= 5000:
is_normal1 = test_anderson(data1, alpha = alpha)
is_normal2 = test_anderson(data2, alpha = alpha)
else:
is_normal1 = test_shapiro(data1, alpha = alpha)
is_normal2 = test_shapiro(data2, alpha = alpha)
# analysis
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if is_normal1 and is_normal2:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - ord
if (type1 == "float64" and type2 == "int64") or (type1 == "int64" and type2 == "float64"):
# number of categories
if type1 == "int64":
ncat = len(np.unique(data1))
else:
ncat = len(np.unique(data2))
# analysis
if ncat < 5:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# return
return result
| 23,626
|
def sha256(buffer=None):
"""Secure Hash Algorithm 2 (SHA-2) with 256 bits hash value."""
return Hash("sha256", buffer)
| 23,627
|
def go():
"""
Simple message queuing broker
Same as request-reply broker but using QUEUE device
"""
# Create ROUTER socket. Socket facing clients
frontend = yield from aiozmq.create_zmq_stream(zmq.ROUTER, bind='tcp://*:5559')
# create DEALER socket. Socket facing services
backend = yield from aiozmq.create_zmq_stream(zmq.DEALER, bind='tcp://*:5560')
# create QUEUE device
#TODO: not sure that this is the best way to do it
zmq.device(zmq.QUEUE, frontend.transport._zmq_sock, backend.transport._zmq_sock)
| 23,628
|
def nmc_eig(model, design, observation_labels, target_labels=None,
N=100, M=10, M_prime=None, independent_priors=False):
"""
Nested Monte Carlo estimate of the expected information
gain (EIG). The estimate is, when there are not any random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log p(y_n | \\theta_n, d) -
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^M p(y_n | \\theta_m, d)\\right)
The estimate is, in the presence of random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M'}\\sum_{m=1}^{M'}
p(y_n | \\theta_n, \\widetilde{\\theta}_{nm}, d)\\right)-
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^{M}
p(y_n | \\theta_m, \\widetilde{\\theta}_{m}, d)\\right)
The latter form is used when `M_prime != None`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int N: Number of outer expectation samples.
:param int M: Number of inner expectation samples for `p(y|d)`.
:param int M_prime: Number of samples for `p(y | theta, d)` if required.
:param bool independent_priors: Only used when `M_prime` is not `None`. Indicates whether the prior distributions
for the target variables and the nuisance variables are independent. In this case, it is not necessary to
sample the targets conditional on the nuisance variables.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str): # list of strings instead of strings
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, N) # N copies of the model
trace = poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
if M_prime is not None:
y_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in observation_labels}
theta_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in target_labels}
theta_dict.update(y_dict)
# Resample M values of u and compute conditional probabilities
# WARNING: currently the use of condition does not actually sample
# the conditional distribution!
# We need to use some importance weighting
conditional_model = pyro.condition(model, data=theta_dict)
if independent_priors:
reexpanded_design = lexpand(design, M_prime, 1)
else:
# Not acceptable to use (M_prime, 1) here - other variables may occur after
# theta, so need to be sampled conditional upon it
reexpanded_design = lexpand(design, M_prime, N)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
conditional_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M_prime)
else:
# This assumes that y are independent conditional on theta
# Furthermore assume that there are no other variables besides theta
conditional_lp = sum(trace.nodes[l]["log_prob"] for l in observation_labels)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Resample M values of theta and compute conditional probabilities
conditional_model = pyro.condition(model, data=y_dict)
# Using (M, 1) instead of (M, N) - acceptable to re-use thetas between ys because
# theta comes before y in graphical model
reexpanded_design = lexpand(design, M, 1) # sample M theta
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
marginal_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M)
terms = conditional_lp - marginal_lp
nonnan = (~torch.isnan(terms)).sum(0).type_as(terms)
terms[torch.isnan(terms)] = 0.
return terms.sum(0)/nonnan
| 23,629
|
def _orient_eigs(eigvecs, phasing_track, corr_metric=None):
"""
Orient each eigenvector deterministically according to the orientation
that correlates better with the phasing track.
Parameters
----------
eigvecs : 2D array (n, k)
`k` eigenvectors (as columns).
phasing_track : 1D array (n,)
Reference track for determining orientation.
corr_metric: spearmanr, pearsonr, var_explained, MAD_explained
Correlation metric to use for selecting orientations.
Returns
-------
2D array (n, k)
Reoriented `k` eigenvectors.
Notes
-----
This function does NOT change the order of the eigenvectors.
"""
for i in range(eigvecs.shape[1]):
mask = np.isfinite(eigvecs[:, i]) & np.isfinite(phasing_track)
if corr_metric is None or corr_metric == "spearmanr":
corr = scipy.stats.spearmanr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "pearsonr":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "var_explained":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
# multiply by the sign to keep the phasing information
corr = np.sign(corr) * corr * corr * np.var(eigvecs[mask, i])
elif corr_metric == "MAD_explained":
corr = (
numutils.COMED(phasing_track[mask], eigvecs[mask, i]) *
numutils.MAD(eigvecs[mask, i])
)
else:
raise ValueError("Unknown correlation metric: {}".format(corr_metric))
eigvecs[:, i] = np.sign(corr) * eigvecs[:, i]
return eigvecs
| 23,630
|
def test_atomic_positive_integer_max_inclusive_4_nistxml_sv_iv_atomic_positive_integer_max_inclusive_5_5(mode, save_output, output_format):
"""
Type atomic/positiveInteger is restricted by facet maxInclusive with
value 999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/positiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-positiveInteger-maxInclusive-5.xsd",
instance="nistData/atomic/positiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-positiveInteger-maxInclusive-5-5.xml",
class_name="NistschemaSvIvAtomicPositiveIntegerMaxInclusive5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 23,631
|
def test_ihex_cut_1(datadir, capsys):
"""測試切割頁面函式
NOTE: 此函式輸入資料須為經過padding之codeblock
大小須符合 pgsz * N
"""
input = [
{
'address': 0,
'data': b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F'
},
{
'address': 0xABCD0010,
'data': b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F'
}
]
space_data = 0xFF
page_size = 4
space_padding = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
predict = [
{
'address': 0,
'data': b'\x00\x01\x02\x03'
},
{
'address': 4,
'data': b'\x04\x05\x06\x07'
},
{
'address': 8,
'data': b'\x08\x09\x0A\x0B'
},
{
'address': 12,
'data': b'\x0C\x0D\x0E\x0F'
},
{
'address': 0xABCD0010,
'data': b'\x01\x01\x02\x03'
},
{
'address': 0xABCD0014,
'data': b'\x04\x05\x06\x07'
},
{
'address': 0xABCD0018,
'data': b'\x08\x09\x0A\x0B'
},
{
'address': 0xABCD001C,
'data': b'\x0C\x0D\x0E\x0F'
}
]
real = ihex.cut_to_pages(input, page_size)
assert(real == predict)
| 23,632
|
def transform(item_paths, output_dir, compresslevel=0):
"""Transform the bmeg input to gen3 output directory."""
projects_emitter = emitter('project', output_dir=output_dir)
experiments_emitter = emitter('experiment', output_dir=output_dir)
cases_emitter = emitter('case', output_dir=output_dir)
demographics_emitter = emitter('demographic', output_dir=output_dir)
cases = {}
projects = {}
experiments = {}
for p in ['source/ccle/InProject.Edge.json.gz', 'source/ccle/maf.InProject.Edge.json.gz']:
for line in reader(p):
# # ['type', 'project_id', '*submitter_id', '*cases.submitter_id', 'ethnicity', 'gender', 'race', 'year_of_birth', 'year_of_death']
project_submitter_id = line['to']
project_name = project_submitter_id.replace('Project:', '')
project_name = 'ccle'
project = {'type': 'project', "code": project_name, "name": project_name, "state": "open", "availability_type": "Open", "dbgap_accession_number": project_name}
projects[project_name] = project
experiment_submitter_id = "experiment-{}".format(project_submitter_id.replace('Project:', ''))
experiment = {"type": "experiment", "projects": [{"code": project_name}], "submitter_id": experiment_submitter_id}
experiment["experimental_description"] = project_submitter_id.replace('Project:', '')
experiments[experiment_submitter_id] = experiment
case = {'type': 'case', '*experiments': {'submitter_id': experiment_submitter_id}}
case_submitter_id = line['from']
case['submitter_id'] = case_submitter_id
cases[case_submitter_id] = case
for project in projects:
projects_emitter.write(projects[project])
for experiment in experiments:
experiments_emitter.write(experiments[experiment])
projects_emitter.close()
experiments_emitter.close()
for p in item_paths:
# ['MRN', 'OPTR', 'Date Of Initial Diagnosis', 'Sequence Number', 'Cancer Status', 'cEarliest Chemo Date', 'cEarliest Chemo Date Source', 'cErrorList', 'cEventCount', 'cNeoadjuvant Treatment', 'Count', 'cParent Specimen Count', 'Date of Most Definitive Surgical Resection', 'Tumor Size', 'Type Of First Recurrence', 'Case_ICD::Transformation', 'Case_Patient::Sex']
for line in reader(p):
# {"_id": "Individual:CCLE:ACH-001665", "gid": "Individual:CCLE:ACH-001665", "label": "Individual", "data": {"individual_id": "CCLE:ACH-001665", "ccle_attributes": {"gender": "Male"}}}
case_submitter_id = line['gid']
# # ['type', 'project_id', '*submitter_id', '*cases.submitter_id', 'ethnicity', 'gender', 'race', 'year_of_birth', 'year_of_death']
case = cases[case_submitter_id]
cases_emitter.write(case)
#
# # type project_id *submitter_id *cases.submitter_id ethnicity gender race year_of_birth year_of_death
demographic = {'type': 'demographic', '*submitter_id': 'demographic-{}'.format(case_submitter_id), '*cases': {'submitter_id': case_submitter_id}}
data = line['data']
demographic['gender'] = data.get('gender', 'unknown').lower()
if demographic['gender'] not in ['male', 'female']:
demographic['gender'] = 'unknown'
demographics_emitter.write(demographic)
#
# # ['type', 'project_id', 'submitter_id', 'cases.submitter_id',
# # '*age_at_diagnosis', '*classification_of_tumor', '*days_to_last_follow_up', '*days_to_last_known_disease_status', '*days_to_recurrence', '*last_known_disease_status', '*morphology', '*primary_diagnosis', '*progression_or_recurrence', '*site_of_resection_or_biopsy', '*tissue_or_organ_of_origin', '*tumor_grade', '*tumor_stage', '*vital_status', # 'ajcc_clinical_m', 'ajcc_clinical_n', 'ajcc_clinical_stage', 'ajcc_clinical_t',
# # 'ajcc_pathologic_m', 'ajcc_pathologic_n', 'ajcc_pathologic_stage', 'ajcc_pathologic_t', 'ann_arbor_b_symptoms', 'ann_arbor_clinical_stage', 'ann_arbor_extranodal_involvement', 'ann_arbor_pathologic_stage', 'burkitt_lymphoma_clinical_variant', 'cause_of_death', 'circumferential_resection_margin', 'colon_polyps_history', 'days_to_birth', 'days_to_death', 'days_to_hiv_diagnosis', 'days_to_new_event', 'figo_stage', 'hiv_positive', 'hpv_positive_type', 'hpv_status', 'laterality',
# # 'ldh_level_at_diagnosis', 'ldh_normal_range_upper', 'lymph_nodes_positive', 'lymphatic_invasion_present', 'method_of_diagnosis', 'new_event_anatomic_site', 'new_event_type', 'perineural_invasion_present', 'prior_malignancy', 'prior_treatment', 'residual_disease', 'vascular_invasion_present', 'year_of_diagnosis']
# diagnosis = {'type': 'diagnosis', '*submitter_id': 'diagnosis-{}'.format(case_submitter_id), '*cases': {'submitter_id': case_submitter_id}}
# diagnosis['*age_at_diagnosis'] = None
# diagnosis['*classification_of_tumor'] = 'Unknown' # ['primary', 'metastasis', 'recurrence', 'other', 'Unknown', 'not reported', 'Not Allowed To Collect']
# diagnosis['*days_to_last_follow_up'] = None
# diagnosis['*days_to_last_known_disease_status'] = None
# diagnosis['*days_to_recurrence'] = None
# # [ 'Distant met recurrence/progression',
# # 'Loco-regional recurrence/progression',
# # 'Biochemical evidence of disease without structural correlate',
# # 'Tumor free',
# # 'Unknown tumor status',
# # 'With tumor',
# # 'not reported',
# # 'Not Allowed To Collect']
# disease_status = {
# 'Evidence of this tumor': 'With tumor',
# 'No evidence of this tumor': 'Tumor free',
# 'Unknown, indeterminate whether this tumor is present; not stated': 'Unknown tumor status'
# }
#
# diagnosis['*last_known_disease_status'] = disease_status.get(line['Cancer Status'], 'Unknown tumor status')
# diagnosis['*morphology'] = 'tumor_size={}'.format(line['Tumor Size']) # "None is not of type 'string'")
# diagnosis['*primary_diagnosis'] = line['Case_ICD::Transformation']
# diagnosis['*progression_or_recurrence'] = 'unknown' # ['yes', 'no', 'unknown', 'not reported', 'Not Allowed To Collect']
# diagnosis['*site_of_resection_or_biopsy'] = 'unknown'
# diagnosis['*tissue_or_organ_of_origin'] = 'pancrease'
# diagnosis['*tumor_grade'] = 'unknown' # "None is not of type 'string'")
# diagnosis['*tumor_stage'] = 'unknown' # "None is not of type 'string'")
# diagnosis['*vital_status'] = 'unknown'
#
# diagnosis_emitter.write(diagnosis)
cases_emitter.close()
demographics_emitter.close()
| 23,633
|
def test_CreativeProject_auto_multivariate_functional(max_iter, max_response, error_lim, model_type):
"""
test that auto method works for a particular multivariate (bivariate) function
"""
# define data
covars = [(0.5, 0, 1), (0.5, 0, 1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return (-(6 * x['covar0'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar0'].iloc[0] - 4)) * (-(6 * x['covar1'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar1'].iloc[0] - 4))
# initialize class instance
cc = TuneSession(covars=covars, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert that max_iter steps taken by optimizer
assert cc.model["covars_sampled_iter"] == max_iter
assert cc.model["covars_proposed_iter"] == max_iter
assert cc.model["response_sampled_iter"] == max_iter
# assert that training and test data is stored
assert cc.train_X.shape[0] == max_iter
assert cc.proposed_X.shape[0] == max_iter
assert cc.train_X.shape[0] == max_iter
assert cc.train_X.shape[1] == 2 # check that it's bivariate train_X
# assert that best response is stored at each step
assert cc.covars_best_response_value.shape[0] == max_iter
assert cc.best_response_value.shape[0] == max_iter
# assert that the correct maximum and covariate values for that spot are identified
THEORETICAL_MAX_COVAR = 1.0
for it in range(len(covars)):
assert abs(cc.covars_best_response_value[-1, it].item() - THEORETICAL_MAX_COVAR)/THEORETICAL_MAX_COVAR \
< error_lim
assert abs(cc.best_response_value[-1].item() - max_response)/max_response < error_lim
| 23,634
|
def applyCustomTitles(titles):
"""
Creates 'chapters-new.xml' which has the user created chapter names.
"""
# Read 'chapters.xml' into a string.
with open('chapters.xml', 'rb') as f:
xmlstr = f.read()
# Since ElementTree doesn't have support for reading/writing the
# DOCTYPE line from xml we need to do it manually.
for x in xmlstr.split(b'\n'):
if b'DOCTYPE' in x:
doctype = x
break
# Parse xml data that we previously read.
root = ET.fromstring(xmlstr)
# Modify chapter names.
index = 0
for chapStr in root.iter('ChapterString'):
chapStr.text = titles[index]
index += 1
# Creates a list from the new xml, so that we can easily write the DOCTYPE
# line in the correct place.
newxmllist = ET.tostring(root, encoding='utf8', method='xml').split(b'\n')
with open('chapters-new.xml', 'wb') as f:
f.write(newxmllist[0] + b'\n')
f.write(doctype + b'\n')
for line in newxmllist[1:]:
line += b'\n'
f.write(line)
| 23,635
|
def test_remove_field():
"""Tests whether removing a field properly
removes the index."""
test = migrations.remove_field(
HStoreField(uniqueness=["beer"]), ["CREATE UNIQUE", "DROP INDEX"]
)
with test as calls:
assert len(calls.get("CREATE UNIQUE", [])) == 0
assert len(calls.get("DROP INDEX", [])) == 1
| 23,636
|
def configure_logging(verbose: int = 0) -> None:
"""Set loglevel.
If ``-v`` flag passed to commandline decrease runtime loglevel for
every repeat occurrence.
``-vvvv`` will always set logging to ``DEBUG``.
Default loglevel is set in the toml config and overridden by
environment variable if there is one.
:param verbose: Level to raise log verbosity by.
"""
levels = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL")
config = toml["logging"]
# create logging dir and it's parents if they do not exist already
_Path(config["handlers"]["default"]["filename"]).expanduser().parent.mkdir(
exist_ok=True, parents=True
)
# tweak loglevel if commandline argument is provided
config["root"]["level"] = levels[
max(0, levels.index(config["root"]["level"]) - verbose)
]
# load values to ``logging``
_logging_config.dictConfig(toml["logging"])
| 23,637
|
def crossValidate(x, y, cv=5, K=None):
"""
:param y: N*L ranking vectors
:return:
"""
results = {"perf": []}
## cross validation ##
np.random.seed(1100)
kf = KFold(n_splits=cv, shuffle=True, random_state=0)
for train, test in kf.split(x):
x_train = x[train, :]
y_train = y[train, :]
x_test = x[test, :]
y_test = y[test, :]
# y_pred = KNN(K=K).fit(x_train, y_train).predict(x_test)
y_pred = multithreadPredict(x_test, KNN(K=K).fit(x_train, y_train))
print y_pred
# print y_pred ### test
results["perf"].append(perfMeasure(y_pred, y_test, rankopt=True))
# print results["perf"][-1]
for key in results.keys():
item = np.array(results[key])
mean = np.nanmean(item, axis=0)
std = np.nanstd(item, axis=0)
results[key] = [mean, std]
return results
| 23,638
|
def make_word_groups(vocab_words):
"""
:param vocab_words: list of vocabulary words with a prefix.
:return: str of prefix followed by vocabulary words with
prefix applied, separated by ' :: '.
This function takes a `vocab_words` list and returns a string
with the prefix and the words with prefix applied, separated
by ' :: '.
"""
vocab_words.reverse()
prefix = vocab_words.pop()
new_list = [prefix]
vocab_words.reverse()
for i in range(len(vocab_words)):
new_list.append(prefix + vocab_words[i])
# print(new_list)
return " :: ".join(new_list)
| 23,639
|
def sync(input, output, all, overwrite):
"""Synchronise all your files between two places"""
print("Syncing")
| 23,640
|
def gpi_g10s40(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
demag = gpi_mag_asdesigned()
if rescale:
demag = demag/rescale # rescale 1.1 gives a bigger mask in PM pupil space
print ("gpi_g10s4...")
hdia, ctrs = gpi_g10s40_asmanufactured(1.0/demag) # meters
return hdia, ctrs
""" From GPI FPRD 2008 http://dms.hia.nrc.ca/view.php?fDocumentId=1398
Filter 1/2 pwr bandwidth
name wavelen/um %
Y 0.95-1.14 18
J 1.12-1.35 19
H 1.50-1.80 18
K1 1.9-2.19 14
K2 2.13-2.4 12
Spectral Resolution 34-36 35-39 44-49 62-70 75-83
# spectral pixels 12-13 13-15 16-18 18-20
18-20
pixels 14mas are nyquist at 1.1
"""
| 23,641
|
def test_close_sections():
"""Parse sections without blank lines in between."""
def f(x, y, z):
"""
Parameters
----------
x :
X
y :
Y
z :
Z
Raises
------
Error2
error.
Error1
error.
Returns
-------
str
value
"""
return x + y + z
sections, errors = parse(inspect.getdoc(f), inspect.signature(f))
assert len(sections) == 3
assert not errors
| 23,642
|
def get_good_contours(proc_image, image, bb, savedir, max_num_add=None):
"""
Adapted from `click_and_crop_v3.py`, except that we have to make the contours.
Here, we're going to inspect and check that the contours are reasonable.
Returns a list of processed contours that I'll then use for later.
"""
cv2.imshow("Now detecting contours for this image.", proc_image)
key = cv2.waitKey(0)
if key in utils.ESC_KEYS:
sys.exit()
(cnts, _) = cv2.findContours(proc_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
processed = []
for c in cnts:
try:
# Find the centroids of the contours in _pixel_space_. :)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if utils.filter_point(cX,cY,xlower=bb[0],xupper=bb[0]+bb[2],ylower=bb[1],yupper=bb[1]+bb[3]):
continue
# Now fit an ellipse!
ellipse = cv2.fitEllipse(c)
cv2.ellipse(image, ellipse, (0,255,0), 2)
name = "Is this ellipse good? ESC to skip it, else add it."
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, 2000, 4000)
cv2.imshow(name, image)
firstkey = cv2.waitKey(0)
if firstkey not in utils.ESC_KEYS:
angle = ellipse[2]
yaw = utils.opencv_ellipse_angle_to_robot_yaw(angle)
processed.append( (cX,cY,angle,yaw) )
cv2.circle(img=image, center=(cX,cY), radius=5, color=(0,0,255), thickness=-1)
cv2.putText(img=image,
text="{},{:.1f}".format(len(processed), angle),
org=(cX,cY),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(255,0,0),
thickness=2)
if (max_num_add is not None) and (len(processed) == max_num_add):
break
except:
pass
assert len(processed) >= 1
cv2.destroyAllWindows()
# Save images for debugging. Then return the processed list.
cv2.imshow("FINAL IMAGE before saving (PRESS ESC IF BAD).", image)
key = cv2.waitKey(0)
if key in utils.ESC_KEYS:
sys.exit()
cv2.imwrite(savedir, image)
return processed
| 23,643
|
def _int_converter(value):
"""Convert string value to int.
We do not use the int converter default exception since we want to make
sure the exact http response code.
Raises: exception_handler.BadRequest if value can not be parsed to int.
Examples:
/<request_path>?count=10 parsed to {'count': '10'} and it should be
converted to {'count': 10}.
"""
try:
return int(value)
except Exception:
raise exception_handler.BadRequest(
'%r type is not int' % value
)
| 23,644
|
def plot_iminuit_contours():
"""plot the confidence contours obtained from the sherpa fit
"""
log.info("plotting parameters contours obtained from iminuit")
# where to take the results, configurations for the individual butterflies
instruments = ["fermi", "magic", "veritas", "fact", "hess", "joint"]
labels = ["Fermi-LAT", "MAGIC", "VERITAS", "FACT", "H.E.S.S.", "joint fit"]
colors = COLORS
lss = ["--", "--", "--", "--", "--", "-"]
fig, axarr = plt.subplots(1, 3, figsize=(16, 5))
# with one loop we realize all the contour plots
for instrument, label, color, ls in zip(instruments, labels, colors, lss):
path = config.repo_path / f"results/fit/gammapy/{instrument}"
contours_path = path / "fit_1.0_sigma_contours_logparabola.npy"
results_path = path / "fit_results_logparabola.yaml"
if not path.exists():
log.warning(f"Missing: {path} . Skipping.")
continue
# load the contours and the results of the fit
contours = np.load(contours_path).tolist()
results = load_yaml(results_path)
# true values to be plotted
amplitude = float(results["parameters"][0]["value"])
alpha = float(results["parameters"][2]["value"])
beta = float(results["parameters"][3]["value"])
# amplitude vs alpha
amplitude_alpha = contours["contour_amplitude_alpha"]
axarr[0].plot(
amplitude_alpha["amplitude"] * 10,
amplitude_alpha["alpha"],
marker="",
ls="-",
lw=2.5,
color=color,
)
# plot actual value
axarr[0].plot(
amplitude * 1e11, alpha, marker="X", markersize=7, color=color, lw=2.5
)
axarr[0].set_xlabel(
r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV} \, {\rm cm}^{-2} {\rm s}^{-1})$",
size=FONTSIZE_CONTOURS,
)
axarr[0].set_ylabel(r"$\Gamma$", size=FONTSIZE_CONTOURS)
# make axis thicker
for axis in ["top", "bottom", "left", "right"]:
axarr[0].spines[axis].set_linewidth(2.5)
axarr[0].set_yticks([2.2, 2.4, 2.6, 2.8])
axarr[0].set_ylim([2.1, 2.9])
axarr[0].set_xticks([3, 4, 5])
axarr[0].set_xlim([2.8, 5.2])
axarr[0].tick_params(
"both", length=7, width=1.6, which="major", labelsize=FONTSIZE_CONTOURS
)
axarr[0].tick_params(
"both", length=4, width=1.6, which="minor", labelsize=FONTSIZE_CONTOURS
)
# amplitude vs beta
amplitude_beta = contours["contour_amplitude_beta"]
axarr[1].plot(
amplitude_beta["amplitude"] * 10,
# contour have a scale factor of 1e-10, parameters are in units of 1e-11
amplitude_beta["beta"],
marker="",
ls="-",
lw=2.5,
color=color,
)
# plot actual value
axarr[1].plot(amplitude * 1e11, beta, marker="X", markersize=7, color=color)
axarr[1].set_xlabel(
r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV} \, {\rm cm}^{-2} {\rm s}^{-1})$",
size=FONTSIZE_CONTOURS,
)
axarr[1].set_ylabel(r"$\beta$", size=FONTSIZE_CONTOURS)
axarr[1].set_xticks([3, 4, 5])
axarr[1].set_xlim([2.8, 5.2])
axarr[1].set_yticks([0.2, 0.4, 0.6])
axarr[1].set_ylim([0.0, 0.8])
# make axis thicker
for axis in ["top", "bottom", "left", "right"]:
axarr[1].spines[axis].set_linewidth(2.5)
axarr[1].tick_params(
"both", length=7, width=1.6, which="major", labelsize=FONTSIZE_CONTOURS
)
axarr[1].tick_params(
"both", length=4, width=1.6, which="minor", labelsize=FONTSIZE_CONTOURS
)
# alpha vs beta
alpha_beta = contours["contour_alpha_beta"]
axarr[2].plot(
alpha_beta["alpha"],
alpha_beta["beta"],
marker="",
ls="-",
lw=2.5,
color=color,
)
# plot actual value
axarr[2].plot(alpha, beta, marker="X", markersize=7, color=color)
axarr[2].set_xlabel(r"$\Gamma$", size=FONTSIZE_CONTOURS)
axarr[2].set_ylabel(r"$\beta$", size=FONTSIZE_CONTOURS)
axarr[2].set_xticks([2.2, 2.4, 2.6, 2.8])
axarr[2].set_xlim([2.1, 2.9])
axarr[2].set_yticks([0.2, 0.4, 0.6])
axarr[2].set_ylim([0.0, 0.8])
# make axis thicker
for axis in ["top", "bottom", "left", "right"]:
axarr[2].spines[axis].set_linewidth(2.5)
axarr[2].tick_params(
"both", length=7, width=1.6, which="major", labelsize=FONTSIZE_CONTOURS
)
axarr[2].tick_params(
"both", length=4, width=1.6, which="minor", labelsize=FONTSIZE_CONTOURS
)
# legend
import matplotlib.lines as mlines
fermi = mlines.Line2D(
[], [], color=COLORS[0], marker="", ls="-", lw=2.5, label="Fermi-LAT"
)
magic = mlines.Line2D(
[], [], color=COLORS[1], marker="", ls="-", lw=2.5, label="MAGIC"
)
veritas = mlines.Line2D(
[], [], color=COLORS[2], marker="", ls="-", lw=2.5, label="VERITAS"
)
fact = mlines.Line2D(
[], [], color=COLORS[3], marker="", ls="-", lw=2.5, label="FACT"
)
hess = mlines.Line2D(
[], [], color=COLORS[4], marker="", ls="-", lw=2.5, label="H.E.S.S."
)
joint = mlines.Line2D(
[], [], color=COLORS[5], marker="", ls="-", lw=2.5, label="joint fit"
)
box = axarr[2].get_position()
axarr[2].set_position([box.x0, box.y0, box.width * 0.97, box.height])
# plot the legend on top of the central plot
axarr[2].legend(
handles=[fermi, magic, veritas, fact, hess, joint],
loc="center left",
fontsize=FONTSIZE_CONTOURS,
bbox_to_anchor=(1., 0.5),
)
plt.tight_layout()
filename = "results/figures/iminuit_logparabola_contour.png"
filename_pdf = "results/figures/iminuit_logparabola_contour.pdf"
fig.savefig(filename)
log.info(f"Writing {filename}")
fig.savefig(filename)
fig.savefig(filename_pdf)
| 23,645
|
def split_rule(rules, rule_name, symbols_to_extract: List[str], subrule_name: str):
"""
Let only options which are starting with symbols from symbols_to_extract.
Put the rest to a subrule.
"""
r = rule_by_name(rules, rule_name)
assert isinstance(r.body, Antlr4Selection), r
sub_options = Antlr4Selection([])
for o in r.body:
start_symbols = set()
_direct_left_corner(o, start_symbols, allow_eps_in_sel=True)
if not start_symbols.intersection(symbols_to_extract):
sub_options.append(o)
r.body = Antlr4Selection([o for o in r.body if not (o in sub_options)])
r.body.insert(0, Antlr4Symbol(subrule_name, False))
if len(r.body) == 1:
r.body = r.body[0]
assert len(sub_options) > 0
if len(sub_options) == 1:
sub_options = sub_options[0]
else:
sub_options = Antlr4Selection(sub_options)
sub_r = Antlr4Rule(subrule_name, sub_options)
rules.insert(rules.index(r), sub_r)
return sub_r
| 23,646
|
def ESS(works_prev, works_incremental):
"""
compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.
Parameters
----------
works_prev: np.array
np.array of floats representing the accumulated works at t-1 (unnormalized)
works_incremental: np.array
np.array of floats representing the incremental works at t (unnormalized)
Returns
-------
normalized_ESS: float
effective sample size
"""
prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
incremental_weights_unnormalized = np.exp(-works_incremental)
ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
normalized_ESS = ESS / len(prev_weights_normalized)
assert normalized_ESS >= 0.0 - DISTRIBUTED_ERROR_TOLERANCE and normalized_ESS <= 1.0 + DISTRIBUTED_ERROR_TOLERANCE, f"the normalized ESS ({normalized_ESS} is not between 0 and 1)"
return normalized_ESS
| 23,647
|
def new_line_over():
"""Creates a new line over the cursor.
The cursor is also moved to the beginning of the new line. It is
not possible to create more than one new line over the cursor
at a time for now.
Usage:
`In a config file:`
.. code-block:: yaml
- new_line_over:
`Using the API:`
.. code-block:: python
ezvi.tools.new_line_over()
:rtype: str
:return: Characters that would be used in ``Vi`` to add a new line
over the cursor.
"""
to_write = "O" + ESCAPE
return to_write
| 23,648
|
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Processed Bucket, Key(s) and Job Details
"""
try:
logger.info("Lambda event is [{}]".format(event))
logger.info(event["body"])
source_bucket = event["body"]["bucket"]
job_name = event["body"]["targetJob"]
ddb_table = event["body"]["targetDDBTable"]
token = event["body"]["token"]
s3_prefix_key_proc = event["body"]["keysRawProc"]
logger.info(
"[{}] [{}] [{}] [{}]".format(
source_bucket,
s3_prefix_key_proc,
job_name,
ddb_table,
)
)
# Submitting a new Glue Job
job_response = client.start_job_run(
JobName=job_name,
Arguments={
# Specify any arguments needed based on bucket and keys (e.g. input/output S3 locations)
"--job-bookmark-option": "job-bookmark-enable",
"--additional-python-modules": "pyarrow==2,awswrangler==2.9.0",
# Custom arguments below
"--TARGET_DDB_TABLE": ddb_table,
"--S3_BUCKET": source_bucket,
"--S3_PREFIX_PROCESSED": s3_prefix_key_proc[0]
#
},
MaxCapacity=2.0,
)
logger.info("Response is [{}]".format(job_response))
# Collecting details about Glue Job after submission (e.g. jobRunId for Glue)
json_data = json.loads(json.dumps(job_response, default=datetimeconverter))
job_details = {
"jobName": job_name,
"jobRunId": json_data.get("JobRunId"),
"jobStatus": "STARTED",
"token": token,
}
response = {"jobDetails": job_details}
except Exception as e:
logger.error("Fatal error", exc_info=True)
sagemaker.send_pipeline_execution_step_failure(
CallbackToken=token, FailureReason="error"
)
raise e
return response
| 23,649
|
def put_path(components, value):
"""Recursive function to put value in component"""
if len(components) > 1:
new = components.pop(0)
value = put_path(components, value)
else:
new = components[0]
return {new: value}
| 23,650
|
def con_orthogonal_checkboard(X,c_v1,c_v2,c_v3,c_v4,num,N):
"""for principal / isothermic / developable mesh / aux_diamond / aux_cmc
(v1-v3)*(v2-v4)=0
"""
col = np.r_[c_v1,c_v2,c_v3,c_v4]
row = np.tile(np.arange(num),12)
d1 = X[c_v2]-X[c_v4]
d2 = X[c_v1]-X[c_v3]
d3 = X[c_v4]-X[c_v2]
d4 = X[c_v3]-X[c_v1]
data = np.r_[d1,d2,d3,d4]
H = sparse.coo_matrix((data,(row,col)), shape=(num, N))
r = np.einsum('ij,ij->i',d1.reshape(-1,3, order='F'),d2.reshape(-1,3, order='F'))
return H,r
| 23,651
|
def _do_callback(s, begin, end, taglist, cont_handler, attrlookup):
"""internal function to convert the tagtable into ContentHandler events
's' is the input text
'begin' is the current position in the text
'end' is 1 past the last position of the text allowed to be parsed
'taglist' is the tag list from mxTextTools.parse
'cont_handler' is the SAX ContentHandler
'attrlookup' is a dict mapping the encoded tag name to the element info
"""
# bind functions to local names for a slight speedup
characters = cont_handler.characters
startElement = cont_handler.startElement
endElement = cont_handler.endElement
for tag, l, r, subtags in taglist:
# If the tag's beginning is after the current position, then
# the text from here to the tag's beginning are characters()
assert begin <= l, "begin = %d and l = %d" % (begin, l)
if begin < l:
characters(s[begin:l])
if tag.startswith(">"):
# Named groups doesn't create ">ignore" tags, so pass them on
# to the ContentHandler. Unnamed groups still need a name so
# mxTextTools can create subtags for them. I named them
# ">ignore" - don't create events for them.
if not tag == ">ignore":
assert tag.startswith(">G"),"Unknown special tag %s" % repr(tag)
# This needs a lookup to get the full attrs
realtag, attrs = attrlookup[tag]
startElement(realtag, attrs)
else:
# Normal tags
startElement(tag, _attribute_list)
# Recurse if it has any children
if subtags:
_do_callback(s, l, r, subtags, cont_handler, attrlookup)
else:
characters(s[l:r])
begin = r
if tag.startswith(">"):
if tag.startswith(">G"):
realtag, attrs = attrlookup[tag]
endElement(realtag)
else:
endElement(tag)
# anything after the last tag and before the end of the current
# range are characters
if begin < end:
characters(s[begin:end])
| 23,652
|
def _PropertyGridInterface_GetPropertyValues(self, dict_=None, as_strings=False, inc_attributes=False):
"""
Returns all property values in the grid.
:param `dict_`: A to fill with the property values. If not given,
then a new one is created. The dict_ can be an object as well,
in which case it's __dict__ is used.
:param `as_strings`: if True, then string representations of values
are fetched instead of native types. Useful for config and such.
:param `inc_attributes`: if True, then property attributes are added
in the form of "@<propname>@<attr>".
:returns: A dictionary with values. It is always a dictionary,
so if dict_ was and object with __dict__ attribute, then that
attribute is returned.
"""
if dict_ is None:
dict_ = {}
elif hasattr(dict_,'__dict__'):
dict_ = dict_.__dict__
getter = self.GetPropertyValue if not as_strings else self.GetPropertyValueAsString
it = self.GetVIterator(PG_ITERATE_PROPERTIES)
while not it.AtEnd():
p = it.GetProperty()
name = p.GetName()
dict_[name] = getter(p)
if inc_attributes:
attrs = p.GetAttributes()
if attrs and len(attrs):
dict_['@%s@attr'%name] = attrs
it.Next()
return dict_
| 23,653
|
def get_pipelines(exp_type, cal_ver=None, context=None):
"""Given `exp_type` and `cal_ver` and `context`, locate the appropriate SYSTEM CRDSCFG
reference file and determine the sequence of pipeline .cfgs required to process that
exp_type.
"""
context = _get_missing_context(context)
cal_ver = _get_missing_calver(cal_ver)
with log.augment_exception("Failed determining required pipeline .cfgs for",
"EXP_TYPE", srepr(exp_type), "CAL_VER", srepr(cal_ver)):
config_manager = _get_config_manager(context, cal_ver)
return config_manager.exptype_to_pipelines(exp_type)
| 23,654
|
def distance_to_line(p,a,b):
"""
Computes the perpendicular distance from a point to an infinite line.
Parameters
----------
p : (x,y)
Coordinates of a point.
a : (x,y)
Coordinates of a point on a line.
b : (x,y)
Coordinates of another point on a line.
Returns
----------
float
The Euclidean distance from p to the infinite line through a & b.
"""
# code by BJK
# area of triangle formed between point and line segment
trianglearea=abs(area([a,b,p]))
# length of line segment
line_length=distance(a,b)
# make sure line segment has a length
if line_length==0:
# a & b are the same, so just calculate distance between points
return distance(p,a)
else:
# the distance we want is the height of the triangle
# area is 1/2 base x height so height is 2*area/base
return 2*trianglearea/line_length
| 23,655
|
def list():
"""
List current config settings
"""
downloader = create_downloader()
click.echo(f"""Download german if available:\t{downloader.config.german or False}
Download japanese if available:\t{downloader.config.japanese or False}
Highest download quality:\t{downloader.config.quality.name if downloader.config.quality else "-"}""")
| 23,656
|
def rate_limited_imap(f, l):
"""A threaded imap that does not produce elements faster than they
are consumed"""
pool = ThreadPool(1)
res = None
for i in l:
res_next = pool.apply_async(f, (i, ))
if res:
yield res.get()
res = res_next
yield res.get()
| 23,657
|
def test(model, dataloader, criterion, params):
"""Test the model using the testloader."""
model.eval()
test_loss = 0
test_correct = 0
test_inputs = 0
bar = tqdm(enumerate(dataloader), total=len(dataloader), desc='Test: ')
with torch.no_grad():
for _, (inputs, labels) in bar:
inputs = inputs.to(params['device'])
labels = labels.to(params['device'])
outputs = model(inputs)
loss = criterion(outputs, labels)
probabilities_task = F.softmax(outputs, dim=1)
_, predicted_task = torch.max(probabilities_task, 1)
test_loss += loss.sum().item()
test_correct += (predicted_task == labels).sum().item()
test_inputs += len(labels)
bar.set_postfix_str(f'Loss Test: {test_loss / test_inputs:.4f}, '
f'Acc Test: {test_correct / test_inputs:.4f}')
| 23,658
|
def pearsonr(A, B):
"""
A broadcasting method to compute pearson r and p
-----------------------------------------------
Parameters:
A: matrix A, (i*k)
B: matrix B, (j*k)
Return:
rcorr: matrix correlation, (i*j)
pcorr: matrix correlation p, (i*j)
Example:
>>> rcorr, pcorr = pearsonr(A, B)
"""
if isinstance(A,list):
A = np.array(A)
if isinstance(B,list):
B = np.array(B)
if np.ndim(A) == 1:
A = A[None,:]
if np.ndim(B) == 1:
B = B[None,:]
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
rcorr = np.dot(A_mA, B_mB.T)/np.sqrt(np.dot(ssA[:,None], ssB[None]))
df = A.T.shape[1] - 2
r_forp = rcorr*1.0
r_forp[r_forp==1.0] = 0.0
t_squared = rcorr.T**2*(df/((1.0-rcorr.T)*(1.0+rcorr.T)))
pcorr = special.betainc(0.5*df, 0.5, df/(df+t_squared))
return rcorr, pcorr
| 23,659
|
def test_get_team_member(client, jwt, session):
"""Assert that the endpoint returns the success status."""
team = create_team(client, jwt, member_role=ProjectRoles.Manager)
response = _get_team_member_(client, jwt, str(team['projectId']), str(team['id']))
assert response.status_code == HTTPStatus.OK
| 23,660
|
def test_lovasz(S):
"""
>>> ha = qudit('a', 3)
>>> np.random.seed(1)
>>> S = TensorSubspace.create_random_hermitian(ha, 5, tracefree=True).perp()
>>> test_lovasz(S)
t: 3.8069736
total err: 0.0000000
total err: 0.0000000
duality gap: 0.0000000
"""
cvxopt.solvers.options['show_progress'] = False
cvxopt.solvers.options['abstol'] = float(1e-8)
cvxopt.solvers.options['reltol'] = float(1e-8)
info = lovasz_theta(S, True)
assert info['sdp_stats']['status'] == 'optimal'
print('t: %.7f' % info['t'])
(tp, errp) = check_lovasz_primal(S, *[ info[x] for x in 'rho,T'.split(',') ])
(td, errd) = check_lovasz_dual(S, *[ info[x] for x in 'Y'.split(',') ])
print('duality gap: %.7f' % (td-tp))
| 23,661
|
def inst_bench(dt, gt, bOpts, tp=None, fp=None, score=None, numInst=None):
"""
ap, rec, prec, npos, details = inst_bench(dt, gt, bOpts, tp = None, fp = None, sc = None, numInst = None)
dt - a list with a dict for each image and with following fields
.boxInfo - info that will be used to cpmpute the overlap with ground truths, a list
.sc - score
gt
.boxInfo - info used to compute the overlap, a list
.diff - a logical array of size nGtx1, saying if the instance is hard or not
bOpt
.minoverlap - the minimum overlap to call it a true positive
[tp], [fp], [sc], [numInst]
Optional arguments, in case the inst_bench_image is being called outside of this function
"""
details = None
if tp is None:
# We do not have the tp, fp, sc, and numInst, so compute them from the structures gt, and out
tp = []
fp = []
numInst = []
score = []
dupDet = []
instId = []
ov = []
for i in range(len(gt)):
# Sort dt by the score
sc = dt[i]["sc"]
bb = dt[i]["boxInfo"]
ind = np.argsort(sc, axis=0)
ind = ind[::-1]
if len(ind) > 0:
sc = np.vstack((sc[i, :] for i in ind))
bb = np.vstack((bb[i, :] for i in ind))
else:
sc = np.zeros((0, 1)).astype(np.float)
bb = np.zeros((0, 4)).astype(np.float)
dtI = dict({"boxInfo": bb, "sc": sc})
tp_i, fp_i, sc_i, numInst_i, dupDet_i, instId_i, ov_i = inst_bench_image(
dtI, gt[i], bOpts
)
tp.append(tp_i)
fp.append(fp_i)
score.append(sc_i)
numInst.append(numInst_i)
dupDet.append(dupDet_i)
instId.append(instId_i)
ov.append(ov_i)
details = {
"tp": list(tp),
"fp": list(fp),
"score": list(score),
"dupDet": list(dupDet),
"numInst": list(numInst),
"instId": list(instId),
"ov": list(ov),
}
tp = np.vstack(tp[:])
fp = np.vstack(fp[:])
sc = np.vstack(score[:])
cat_all = np.hstack((tp, fp, sc))
ind = np.argsort(cat_all[:, 2])
cat_all = cat_all[ind[::-1], :]
tp = np.cumsum(cat_all[:, 0], axis=0)
fp = np.cumsum(cat_all[:, 1], axis=0)
thresh = cat_all[:, 2]
npos = np.sum(numInst, axis=0)
# Compute precision/recall
rec = tp / npos
prec = np.divide(tp, (fp + tp))
ap = VOCap(rec, prec)
return ap, rec, prec, npos, details
| 23,662
|
def getpid(): # real signature unknown; restored from __doc__
"""
getpid() -> pid
Return the current process id
"""
pass
| 23,663
|
def _tee(
cmd: str, executable: str, abort_on_error: bool
) -> Tuple[int, List[str]]:
"""
Execute command "cmd", capturing its output and removing empty lines.
:return: list of strings
"""
_LOG.debug("cmd=%s executable=%s", cmd, executable)
rc, output = hsysinte.system_to_string(cmd, abort_on_error=abort_on_error)
hdbg.dassert_isinstance(output, str)
output1 = output.split("\n")
_LOG.debug("output1= (%d)\n'%s'", len(output1), "\n".join(output1))
#
output2 = hprint.remove_empty_lines_from_string_list(output1)
_LOG.debug("output2= (%d)\n'%s'", len(output2), "\n".join(output2))
_dassert_list_of_strings(output2)
return rc, output2
| 23,664
|
def normalized_str(token):
"""
Return as-is text for tokens that are proper nouns or acronyms, lemmatized
text for everything else.
Args:
token (``spacy.Token`` or ``spacy.Span``)
Returns:
str
"""
if isinstance(token, SpacyToken):
return token.text if preserve_case(token) else token.lemma_
elif isinstance(token, SpacySpan):
return ' '.join(subtok.text if preserve_case(subtok) else subtok.lemma_
for subtok in token)
else:
msg = 'Input must be a spacy Token or Span, not {}.'.format(type(token))
raise TypeError(msg)
| 23,665
|
def scatter_nd(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/scatter_nd .
"""
return tensorflow.scatter_nd(*args, **kwargs)
| 23,666
|
def validate_element(element, validator, csv_schema=SCHEMA):
"""Raise ValidationError if element does not match schema"""
if validator.validate(element, csv_schema) is not True:
field, errors = next(iter(validator.errors.items()))
message_string = '''\nElement of type '{0}' has the following
errors:\n{1}'''
error_string = pprint.pformat(errors)
raise Exception(message_string.format(field, error_string))
| 23,667
|
def format_errors(errors, indent=0, prefix='', suffix=''):
"""
string: "example"
"example"
dict:
"example":
-
"""
if is_single_item_iterable(errors):
errors = errors[0]
if isinstance(errors, SINGULAR_TYPES):
yield indent_message(repr(errors), indent, prefix=prefix, suffix=suffix)
elif isinstance(errors, collections.Mapping):
for key, value in errors.items():
assert isinstance(key, SINGULAR_TYPES), type(key)
if isinstance(value, SINGULAR_TYPES):
message = "{0}: {1}".format(repr(key), repr(value))
yield indent_message(message, indent, prefix=prefix, suffix=suffix)
else:
yield indent_message(repr(key), indent, prefix=prefix, suffix=':')
for message in format_errors(value, indent + 4, prefix='- '):
yield message
elif is_non_string_iterable(errors):
# for making the rhs of the numbers line up
extra_indent = int(math.ceil(math.log10(len(errors)))) + 2
for index, value in enumerate(errors):
list_prefix = "{0}. ".format(index)
messages = format_errors(
value,
indent=indent + extra_indent - len(list_prefix),
prefix=list_prefix,
)
for message in messages:
yield message
else:
assert False, "should not be possible"
| 23,668
|
def concatenate_sequences(X: Union[list, np.ndarray], y: Union[list, np.ndarray],
sequence_to_value: bool = False) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Concatenate multiple sequences to scikit-learn compatible numpy arrays.
´Parameters
-----------
X : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
y : Union[list, np.ndarray] of shape=(n_sequences, )
All sequences. Note that all elements in ```X```
must have at least one equal dimension.
sequence_to_value : bool, default=False
If true, expand each element of y to the sequence length
Returns
-------
X : np.ndarray of shape=(n_samples, n_features)
Input data where n_samples is the accumulated length of all sequences
y : np.ndarray of shape=(n_samples, n_features) or shape=(n_samples, )
Target data where n_samples is the accumulated length of all sequences
sequence_ranges : Union[None, np.ndarray] of shape=(n_sequences, 2)
Sequence border indicator matrix
"""
if isinstance(X, list):
X = np.asarray(X)
if isinstance(y, list):
y = np.asarray(y)
X = np.array(X)
y = np.array(y)
if sequence_to_value:
for k, _ in enumerate(y):
y[k] = np.repeat(y[k], X[k].shape[0])
check_consistent_length(X, y)
sequence_ranges: np.ndarray = np.ndarray([])
if X.ndim == 1:
sequence_ranges = np.zeros((X.shape[0], 2), dtype=int)
sequence_ranges[:, 1] = np.cumsum([X[k].shape[0] for k, _ in enumerate(X)])
sequence_ranges[1:, 0] = sequence_ranges[:-1, 1]
for k, _ in enumerate(X):
X[k], y[k] = check_X_y(X[k], y[k], multi_output=True)
return np.concatenate(X), np.concatenate(y), sequence_ranges
| 23,669
|
def print_help_page(bot, file=sys.stdout):
"""print help page"""
def p(text):
print(text, file=file)
plugin = bot.get_plugin(Commands)
title = "Available Commands for {nick} at {host}".format(**bot.config)
p("=" * len(title))
p(title)
p("=" * len(title))
p('')
p('.. contents::')
p('')
modules = {}
for name, (predicates, callback) in plugin.items():
commands = modules.setdefault(callback.__module__, [])
commands.append((name, callback, predicates))
for module in sorted(modules):
p(module)
p('=' * len(module))
p('')
for name, callback, predicates in sorted(modules[module]):
p(name)
p('-' * len(name))
p('')
doc = callback.__doc__
doc = doc.replace('%%', bot.config.cmd)
for line in doc.split('\n'):
line = line.strip()
if line.startswith(bot.config.cmd):
line = ' ``{}``'.format(line)
p(line)
if 'permission' in predicates:
p('*Require {0[permission]} permission.*'.format(predicates))
if predicates.get('public', True) is False:
p('*Only available in private.*')
p('')
| 23,670
|
def get_export_table_operator(table_name, dag=None):
"""Get templated BigQueryToCloudStorageOperator.
Args:
table_name (string): Name of the table to export.
dag (airflow.models.DAG): DAG used by context_manager. e.g. `with get_dag() as dag: get_export_table_operator(..., dag=dag)`. Defaults to None.
Returns:
airflow.contrib.operators.bigquery_operator.BigQueryOperator
"""
if dag is None:
logger.warning('No DAG context was found. The operator may not be associated to any DAG nor appeared in Web UI')
date_descriptor = '{{ ds_nodash }}'
table_name_with_date_descriptor = \
'{table_name}{date_descriptor}'.format(
table_name=table_name,
date_descriptor=date_descriptor)
return BigQueryToCloudStorageOperator(
dag=dag or models._CONTEXT_MANAGER_DAG,
task_id='{experiment_name}.{table_name}.export'
.format(
experiment_name=get_config('experiment_name'),
table_name=table_name),
source_project_dataset_table='{gcp_project_name}.{database_name}.{table_name}'
.format(
gcp_project_name=get_config('gcp_project_name'),
database_name='%s_database' % get_config('experiment_name'),
table_name=table_name_with_date_descriptor),
# TODO: 1GB以上のデータに対応
# https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
destination_cloud_storage_uris=[
'gs://{bucket_name}/{experiment_name}/exported_tables/'
'{table_name}/{date_descriptor}/'
'out.csv.gzip'.format(
bucket_name=get_config('bucket_name'),
experiment_name=get_config('experiment_name'),
date_descriptor=date_descriptor,
table_name=table_name)],
compression="GZIP")
| 23,671
|
def test_winner_solver_after_run():
"""Assert that the solver is the winning model after run."""
atom = ATOMClassifier(X_class, y_class, random_state=1)
atom.run("LR")
atom.branch = "fs_branch"
atom.feature_selection(strategy="sfm", solver=None, n_features=8)
assert atom.pipeline[0].sfm.estimator_ is atom.winner.estimator
| 23,672
|
def run_command_with_code(cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = sp.PIPE
else:
stdout = None
proc = sp.Popen(cmd, stdout=stdout, stderr=sp.PIPE)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
log.error('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output, proc.returncode
| 23,673
|
async def delete_project(
delete_project_request: DeleteProject, token: str = Depends(oauth2_scheme)
):
"""[API router to delete project on AWS Rekognition]
Args:
delete_project_request (DeleteProject): [AWS Rekognition create project request]
token (str, optional): [Bearer token for authentication]. Defaults to Depends(oauth2_scheme).
Raises:
HTTPException: [Unauthorized exception when invalid token is passed]
error: [Exception in underlying controller]
Returns:
[DeleteProjectResponse]: [AWS Rekognition delete project response]
"""
try:
logging.info("Calling /aws/rekog/delete_project endpoint")
logging.debug(f"Request: {delete_project_request}")
if decodeJWT(token=token):
response = ProjectController().delete_project(
request=delete_project_request
)
return DeleteProjectResponse(**response)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid access token",
headers={"WWW-Authenticate": "Bearer"},
)
except Exception as error:
logging.error(f"Error in /aws/rekog/delete_project endpoint: {error}")
raise error
| 23,674
|
def _create_element_invocation(span_: span.Span, callee: Union[ast.NameRef,
ast.ModRef],
arg_array: ast.Expr) -> ast.Invocation:
"""Creates a function invocation on the first element of the given array.
We need to create a fake invocation to deduce the type of a function
in the case where map is called with a builtin as the map function. Normally,
map functions (including parametric ones) have their types deduced when their
ast.Function nodes are encountered (where a similar fake ast.Invocation node
is created).
Builtins don't have ast.Function nodes, so that inference can't occur, so we
essentually perform that synthesis and deduction here.
Args:
span_: The location in the code where analysis is occurring.
callee: The function to be invoked.
arg_array: The array of arguments (at least one) to the function.
Returns:
An invocation node for the given function when called with an element in the
argument array.
"""
annotation = ast.TypeAnnotation(
span_, scanner.Token(scanner.TokenKind.KEYWORD, span_,
scanner.Keyword.U32), ())
index_number = ast.Number(
scanner.Token(scanner.TokenKind.KEYWORD, span_, '32'), annotation)
index = ast.Index(span_, arg_array, index_number)
return ast.Invocation(span_, callee, (index,))
| 23,675
|
def evaluate(feature_dir, prefix, settings, total_runs=10):
"""to evaluate and rank results for SYSU_MM01 dataset
Arguments:
feature_dir {str} -- a dir where features are saved
prefix {str} -- prefix of file names
"""
gallery_cams, probe_cams = gen_utils.get_cam_settings(settings)
all_cams = list(set(gallery_cams + probe_cams)) # get unique cams
features = {}
# get permutation indices
cam_permutations = gen_utils.get_cam_permutation_indices(all_cams)
# get test ids
test_ids = gen_utils.get_test_ids()
for cam_index in all_cams:
# read features
cam_feature_file = osp.join(feature_dir, (prefix + "_cam{}").format(cam_index))
features["cam" + str(cam_index)] = gen_utils.load_feature_file(cam_feature_file)
# perform testing
print(list(features.keys()))
cam_id_locations = [1, 2, 2, 4, 5, 6]
# camera 2 and 3 are in the same location
mAPs = []
cmcs = []
for run_index in range(total_runs):
print("trial #{}".format(run_index))
X_gallery, Y_gallery, cam_gallery, X_probe, Y_probe, cam_probe = gen_utils.get_testing_set(
features,
cam_permutations,
test_ids,
run_index,
cam_id_locations,
gallery_cams,
probe_cams,
settings,
)
# print(X_gallery.shape, Y_gallery.shape, cam_gallery.shape, X_probe.shape, Y_probe.shape, cam_probe.shape)
dist = gen_utils.euclidean_dist(X_probe, X_gallery)
cmc = gen_utils.get_cmc_multi_cam(
Y_gallery, cam_gallery, Y_probe, cam_probe, dist
)
mAP = gen_utils.get_mAP_multi_cam(
Y_gallery, cam_gallery, Y_probe, cam_probe, dist
)
print("rank 1 5 10 20", cmc[[0, 4, 9, 19]])
print("mAP", mAP)
cmcs.append(cmc)
mAPs.append(mAP)
# find mean mAP and cmc
cmcs = np.array(cmcs) # 10 x #gallery
mAPs = np.array(mAPs) # 10
mean_cmc = np.mean(cmcs, axis=0)
mean_mAP = np.mean(mAPs)
print("mean rank 1 5 10 20", mean_cmc[[0, 4, 9, 19]])
print("mean mAP", mean_mAP)
| 23,676
|
def test_door_pause_protocol(enable_door_safety_switch):
"""
Test that when the door safety switch is enabled, pause cannot
be resumed until the door is closed
"""
pause_mgr = PauseManager(door_state=DoorState.CLOSED)
assert pause_mgr.queue == []
pause_mgr.set_door(door_state=DoorState.OPEN)
pause_mgr.pause(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.PAUSE]
with pytest.raises(PauseResumeError):
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == [PauseType.PAUSE]
pause_mgr.set_door(door_state=DoorState.CLOSED)
assert pause_mgr.queue == [PauseType.PAUSE]
pause_mgr.resume(PauseType.PAUSE)
assert pause_mgr.queue == []
| 23,677
|
def _CreateClassToFileNameDict(test_apk):
"""Creates a dict mapping classes to file names from size-info apk."""
constants.CheckOutputDirectory()
test_apk_size_info = os.path.join(constants.GetOutDirectory(), 'size-info',
os.path.basename(test_apk) + '.jar.info')
class_to_file_dict = {}
# Some tests such as webview_cts_tests use a separately downloaded apk to run
# tests. This means the apk may not have been built by the system and hence
# no size info file exists.
if not os.path.exists(test_apk_size_info):
logging.debug('Apk size file not found. %s', test_apk_size_info)
return class_to_file_dict
with open(test_apk_size_info, 'r') as f:
for line in f:
file_class, file_name = line.rstrip().split(',', 1)
# Only want files that are not prebuilt.
if file_name.startswith('../../'):
class_to_file_dict[file_class] = str(
file_name.replace('../../', '//', 1))
return class_to_file_dict
| 23,678
|
def main():
"""main function
"""
bcl2fastq_qc_script = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), "bcl2fastq_qc.py"))
assert os.path.exists(bcl2fastq_qc_script)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-t', "--testing", action='store_true',
help="Use MongoDB test server")
default = 14
parser.add_argument('-w', '--win', type=int, default=default,
help="Number of days to look back (default {})".format(default))
parser.add_argument('-n', "--dry-run", action='store_true',
help="Dry run")
parser.add_argument('--no-mail', action='store_true',
help="Don't send email on detected failures")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity")
parser.add_argument('-q', '--quiet', action='count', default=0,
help="Decrease verbosity")
args = parser.parse_args()
# Repeateable -v and -q for setting logging level.
# See https://www.reddit.com/r/Python/comments/3nctlm/what_python_tools_should_i_be_using_on_every/
# and https://gist.github.com/andreas-wilm/b6031a84a33e652680d4
# script -vv -> DEBUG
# script -v -> INFO
# script -> WARNING
# script -q -> ERROR
# script -qq -> CRITICAL
# script -qqq -> no logging at all
logger.setLevel(logging.WARN + 10*args.quiet - 10*args.verbose)
if not is_production_user():
logger.warning("Not a production user. Skipping DB update")
sys.exit(1)
connection = mongodb_conn(args.testing)
if connection is None:
sys.exit(1)
db = connection.gisds.runcomplete
epoch_present, epoch_back = generate_window(args.win)
results = db.find({"analysis.Status": "SUCCESS", "analysis.QC_status" : {"$exists": 0},
"timestamp": {"$gt": epoch_back, "$lt": epoch_present}})
logger.info("Found %s runs", results.count())
for record in results:
run_number = record['run']
analysis = record['analysis']
#for analysis in record['analysis']:
for (analysis_count, analysis) in enumerate(record['analysis']):
out_dir = analysis["out_dir"]
analysis_id = analysis['analysis_id']
status = analysis['Status']
#Check if bcl2Fastq is completed successfully
if analysis['Status'] != "SUCCESS":
logger.info("Analysis is not completed successfully under %s", out_dir)
continue
if not os.path.exists(out_dir):
logger.critical("Following directory listed in DB doesn't exist: %s", out_dir)
continue
if args.testing:
bcl2fastq_qc_out = os.path.join(out_dir, "bcl2fastq_qc.test.txt")
else:
bcl2fastq_qc_out = os.path.join(out_dir, "bcl2fastq_qc.txt")
if os.path.exists(bcl2fastq_qc_out):
logger.critical("Refusing to overwrite existing file %s. Skipping QC check", bcl2fastq_qc_out)
continue
bcl2fastq_qc_cmd = [bcl2fastq_qc_script, '-d', out_dir]
if args.no_mail:
bcl2fastq_qc_cmd.append("--no-mail")
if args.dry_run:
logger.warning("Skipped following run: %s", out_dir)
continue
try:
QC_status = "analysis.{}.QC_status".format(analysis_count)
status = subprocess.check_output(bcl2fastq_qc_cmd, stderr=subprocess.STDOUT)
if "QC_FAILED" in str(status):
db.update({"run": run_number, 'analysis.analysis_id' : analysis_id},
{"$set": {QC_status: "FAILED"}})
logger.info("Demux QC failed for run: %s", run_number)
else:
db.update({"run": run_number, 'analysis.analysis_id' : analysis_id},
{"$set": {QC_status: "SUCCESS"}})
logger.info("Demux QC SUCCESS for run: %s", run_number)
with open(bcl2fastq_qc_out, 'w') as fh:
fh.write(status.decode())
except subprocess.CalledProcessError as e:
logger.fatal("The following command failed with return code %s: %s",
e.returncode, ' '.join(bcl2fastq_qc_cmd))
logger.fatal("Output: %s", e.output.decode())
logger.fatal("Exiting")
connection.close()
| 23,679
|
def train_deeper_better(train_data, train_labels, test_data, test_labels, params):
"""Same as 'train_deeper', but now with tf.contrib.data.Dataset input pipeline."""
default_params = {
'regularization_coeff': 0.00001,
'keep_prob': 0.5,
'batch_size': 128,
'fc1_size': 2048,
'fc2_size': 1024,
'fc3_size': 1024,
'fc4_size': 1024,
'fc5_size': 512,
'activation': 'relu',
}
activation_funcs = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
}
def get_param(name):
if name in params:
return params[name]
logger.warning('%s not found in param, use default value %r', name, default_params[name])
return default_params[name]
regularization_coeff = get_param('regularization_coeff')
keep_prob_param = get_param('keep_prob')
batch_size = int(get_param('batch_size'))
fc1_size = int(get_param('fc1_size'))
fc2_size = int(get_param('fc2_size'))
fc3_size = int(get_param('fc3_size'))
fc4_size = int(get_param('fc4_size'))
fc5_size = int(get_param('fc5_size'))
activation_func = activation_funcs[get_param('activation')]
save_restore = False
time_limit_seconds = 3600
saver_path = join(SAVER_FOLDER, train_deeper_better.__name__)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
global_step_tensor = tf.contrib.framework.get_or_create_global_step()
epoch_tensor = tf.Variable(0, trainable=False, name='epoch')
next_epoch = tf.assign_add(epoch_tensor, 1)
# dataset definition
dataset = Dataset.from_tensor_slices({'x': train_data, 'y': train_labels})
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
sample = iterator.get_next()
x = sample['x']
y = sample['y']
# actual computation graph
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
regularizer = tf.contrib.layers.l2_regularizer(scale=regularization_coeff)
def fully_connected(x, size, name):
return dense_regularized(
x, size, is_training, keep_prob, regularizer, name, activation_func,
)
fc1 = fully_connected(x, fc1_size, 'fc1')
fc2 = fully_connected(fc1, fc2_size, 'fc2')
fc3 = fully_connected(fc2, fc3_size, 'fc3')
fc4 = fully_connected(fc3, fc4_size, 'fc4')
fc5 = fully_connected(fc4, fc5_size, 'fc5')
logits = dense(fc5, NUM_CLASSES, regularizer, 'logits')
layer_summaries(logits, 'logits_summaries')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32),
)
accuracy_percent = 100 * accuracy
tf.summary.scalar('accuracy_percent', accuracy_percent)
with tf.name_scope('loss'):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_loss = tf.reduce_sum(regularization_losses)
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y),
)
loss = cross_entropy_loss + regularization_loss
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)
tf.summary.scalar('loss', loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# ensures that we execute the update_ops before performing the train_op
# needed for batch normalization (apparently)
optimizer = tf.train.AdamOptimizer(learning_rate=(1e-4), epsilon=1e-3)
train_op = optimizer.minimize(loss, global_step=global_step_tensor)
all_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'train'))
batch_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'batch'))
test_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'test'))
saver = tf.train.Saver(max_to_keep=3)
test_accuracy = 0
best_accuracy = 0
with tf.Session(graph=graph) as sess:
restored = False
if save_restore:
try:
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=SAVER_FOLDER))
restored = True
except ValueError as exc:
logger.info('Could not restore previous session! %r', exc)
logger.info('Starting from scratch!')
if not restored:
tf.global_variables_initializer().run()
logger.info('Starting training...')
start_time = time.time()
def enough():
if time_limit_seconds is None:
return False
elapsed = time.time() - start_time
return elapsed > time_limit_seconds
epoch = epoch_tensor.eval()
new_epoch = True
while not enough():
logger.info('Starting new epoch #%d!', epoch)
sess.run(iterator.initializer, feed_dict={})
while not enough():
step = tf.train.global_step(sess, tf.train.get_global_step())
try:
sess.run(train_op, feed_dict={keep_prob: keep_prob_param, is_training: True})
if new_epoch:
new_epoch = False
l, reg_l, ac, summaries = sess.run(
[loss, regularization_loss, accuracy_percent, all_summaries],
feed_dict={keep_prob: keep_prob_param, is_training: False},
)
batch_writer.add_summary(summaries, global_step=step)
logger.info(
'Minibatch loss: %f, reg loss: %f, accuracy: %.2f%%',
l, reg_l, ac,
)
except tf.errors.OutOfRangeError:
logger.info('End of epoch #%d', epoch)
break
# end of epoch
previous_epoch = epoch
epoch = next_epoch.eval()
new_epoch = True
if previous_epoch % 5 == 0 and save_restore:
saver.save(sess, saver_path, global_step=previous_epoch)
def get_eval_dict(data, labels):
"""Data for evaluation."""
return {x: data, y: labels, keep_prob: 1, is_training: False}
train_l, train_ac, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(train_data[:10000], train_labels[:10000]),
)
train_writer.add_summary(summaries, global_step=step)
test_l, test_accuracy, summaries = sess.run(
[loss, accuracy_percent, all_summaries],
feed_dict=get_eval_dict(test_data, test_labels),
)
test_writer.add_summary(summaries, global_step=step)
best_accuracy = max(best_accuracy, test_accuracy)
logger.info('Train loss: %f, train accuracy: %.2f%%', train_l, train_ac)
logger.info(
'Test loss: %f, TEST ACCURACY: %.2f%% BEST ACCURACY %.2f%% <<<<<<<',
test_l, test_accuracy, best_accuracy,
)
return best_accuracy
| 23,680
|
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64], desired: numpy.ndarray, decimal: int
):
"""
usage.statsmodels: 4
"""
...
| 23,681
|
def test_data(landsat_number):
"""
Downloads a dataset for testing from Landsat 4, 5, 7, or 8
*Note that you must be signed into earthexplorer.usgs.gov
Inputs:
landsat_number 4, 5, 7, or 8 - the desired Landsat satellites to sample data from
"""
#ensure the input landsat number is integer type
landsat = int(landsat_number)
#open the earthexplorer url for the given landsat number
if landsat == 8:
url = "http://earthexplorer.usgs.gov/download/4923/LC80410362014232LGN00/STANDARD"
webbrowser.open(url)
elif landsat == 7:
url = "http://earthexplorer.usgs.gov/download/3372/LE70410362003114EDC00/STANDARD"
webbrowser.open(url)
elif landsat == 5:
url = "http://earthexplorer.usgs.gov/download/3119/LT50410362011208PAC01/STANDARD"
webbrowser.open(url)
elif landsat == 4:
url = "http://earthexplorer.usgs.gov/download/3119/LT40410361990014XXX01/STANDARD"
webbrowser.open(url)
else:
print "Please enter 4, 5, 7, or 8"
return
| 23,682
|
def clear_file(filename: str):
"""Method to clean file."""
open(filename, 'w').close()
| 23,683
|
def redirect_return():
"""Redirects back from page with url generated by url_return."""
return redirect(str(Url.get_return()))
| 23,684
|
def ButtonDisplay(hand_view):
""" This updates draw pile and action buttons. It is called in HandView.update each render cycle. """
loc_xy = (hand_view.draw_pile.x, hand_view.draw_pile.y)
hand_view.draw_pile.draw(hand_view.display, loc_xy, hand_view.draw_pile.outline_color)
# update discard info and redraw
discard_info = hand_view.controller.getDiscardInfo()
hand_view.top_discard = discard_info[0]
hand_view.pickup_pile_sz = discard_info[1]
if hand_view.pickup_pile_sz > 0:
# pickup_pile needs new clickable image each time any player discards or picks up the pile.
# Next few lines insure pickup_pile image is up to date.
hand_view.top_discard_wrapped = UICardWrapper(hand_view.top_discard, (100, 25), UIC.scale)
hand_view.pickup_pile = hand_view.top_discard_wrapped.img_clickable
loc_xy = (hand_view.pickup_pile.x, hand_view.pickup_pile.y)
# UICardWrapper sets outline color to no outline, next line resets outline to proper value.
hand_view.pickup_pile.outline_color = hand_view.pickup_pile_outline
hand_view.pickup_pile.draw(hand_view.display, loc_xy, hand_view.pickup_pile.outline_color)
hand_view.labelMedium(str(hand_view.pickup_pile_sz), 150, 35)
if hand_view.controller._state.round == -1:
hand_view.ready_yes_btn.draw(hand_view.display, hand_view.ready_yes_btn.outline_color)
hand_view.ready_no_btn.draw(hand_view.display, hand_view.ready_no_btn.outline_color)
else:
hand_view.labelMedium(str(Meld_Threshold[hand_view.controller._state.round]) + " points to meld",
hand_view.round_indicator_xy[0], hand_view.round_indicator_xy[1])
hand_view.sort_status_btn.draw(hand_view.display, hand_view.sort_status_btn.outline_color)
hand_view.sort_btn.draw(hand_view.display, hand_view.sort_btn.outline_color)
hand_view.prepare_card_btn.draw(hand_view.display, hand_view.prepare_card_btn.outline_color)
hand_view.clear_prepared_cards_btn.draw(hand_view.display, hand_view.clear_prepared_cards_btn.outline_color)
hand_view.clear_selected_cards_btn.draw(hand_view.display, hand_view.clear_selected_cards_btn.outline_color)
hand_view.play_prepared_cards_btn.draw(hand_view.display, hand_view.play_prepared_cards_btn.outline_color)
hand_view.discard_action_btn.draw(hand_view.display, hand_view.discard_action_btn.outline_color)
hand_view.heart_btn.draw(hand_view.display, hand_view.heart_btn.outline_color)
return
| 23,685
|
def setup(coresys: CoreSys) -> EvaluateBase:
"""Initialize evaluation-setup function."""
return EvaluateOperatingSystem(coresys)
| 23,686
|
def _weight_initializers(seed=42):
"""Function returns initilializers to be used in the model."""
kernel_initializer = tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.02, seed=seed
)
bias_initializer = tf.keras.initializers.Zeros()
return kernel_initializer, bias_initializer
| 23,687
|
def unot(b: bool) -> bool:
"""
"""
...
| 23,688
|
def inputs(eval_data, data_dir, batch_size):
"""Construct input for eye evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the eye data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
data_dir = os.path.join(data_dir, 'tr')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
data_dir = os.path.join(data_dir, 'te')
filenames = []
for folder_name in LABLES:
folder_path = os.path.join(data_dir, folder_name)
filenames += [os.path.join(folder_path, f) for f in os.listdir(folder_path)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_eye(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Fix the shape of Tensor
float_image.set_shape([height, width, 3])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.1
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
| 23,689
|
def test_queryparser_stoplist_iter():
"""Test QueryParser stoplist iterator.
"""
stemmer = xapian.Stem('en')
# Check behaviour without having set a stoplist.
queryparser = xapian.QueryParser()
queryparser.set_stemmer(stemmer)
queryparser.set_stemming_strategy(queryparser.STEM_SOME)
expect([term for term in queryparser.stoplist()], [])
query = queryparser.parse_query('to be or not to be is the questions')
expect([term for term in queryparser.stoplist()], [])
expect(str(query),
'Xapian::Query((Zto:(pos=1) OR Zbe:(pos=2) OR Zor:(pos=3) OR '
'Znot:(pos=4) OR Zto:(pos=5) OR Zbe:(pos=6) OR Zis:(pos=7) OR '
'Zthe:(pos=8) OR Zquestion:(pos=9)))')
# Check behaviour with a stoplist, but no stemmer
queryparser = xapian.QueryParser()
stopper = xapian.SimpleStopper()
stopper.add('to')
stopper.add('not')
stopper.add('question')
queryparser.set_stopper(stopper)
expect([term for term in queryparser.stoplist()], [])
query = queryparser.parse_query('to be or not to be is the questions')
expect([term for term in queryparser.stoplist()], ['to', 'not', 'to'])
expect(str(query),
'Xapian::Query((be:(pos=2) OR or:(pos=3) OR '
'be:(pos=6) OR is:(pos=7) OR '
'the:(pos=8) OR questions:(pos=9)))')
# Check behaviour with a stoplist and a stemmer
queryparser.set_stemmer(stemmer)
queryparser.set_stemming_strategy(queryparser.STEM_SOME)
expect([term for term in queryparser.stoplist()], ['to', 'not', 'to']) # Shouldn't have changed since previous query.
query = queryparser.parse_query('to be or not to be is the questions')
expect([term for term in queryparser.stoplist()], ['to', 'not', 'to'])
expect(str(query),
'Xapian::Query((Zbe:(pos=2) OR Zor:(pos=3) OR '
'Zbe:(pos=6) OR Zis:(pos=7) OR '
'Zthe:(pos=8) OR Zquestion:(pos=9)))')
| 23,690
|
def build_docker_image_docker(context,
docker_file,
external_docker_name,
push_connection: Connection,
pull_connection: typing.Optional[Connection] = None):
"""
Build and push docker image
:param context: docker build context
:param docker_file: Dockerfile name (relative to docker build context)
:param external_docker_name: external docker image target name, without host
:param push_connection: connection for pushing Docker images to
:param pull_connection: (Optional) connection for pulling Docker image during build from
:return:
"""
logging.debug('Building docker client from ENV variables')
client = docker.from_env()
# Authorize for pull
if pull_connection:
logging.debug('Trying to authorize user fo pulling sources')
_authorize_docker(client, pull_connection)
# Build image
streamer = client.api.build(path=context,
rm=True,
dockerfile=docker_file,
tag=external_docker_name)
for chunk in streamer:
if isinstance(chunk, bytes):
chunk = chunk.decode('utf-8')
try:
chunk_json = json.loads(chunk)
if 'stream' in chunk_json:
for line in chunk_json['stream'].splitlines():
LOGGER.info(line.strip())
except json.JSONDecodeError:
LOGGER.info(chunk)
# Tag for pushing
remote_tag = f'{push_connection.spec.uri}/{external_docker_name}'
local_built = client.images.get(external_docker_name)
local_built.tag(remote_tag)
# Push
log_generator = client.images.push(repository=remote_tag,
stream=True,
auth_config={
'username': push_connection.spec.username,
'password': push_connection.spec.password
})
for line in log_generator:
if isinstance(line, bytes):
line = line.decode('utf-8')
LOGGER.info(line)
client.images.remove(remote_tag)
client.images.remove(external_docker_name)
| 23,691
|
def get_non_ready_rs_pod_names(namespace):
"""
get names of rs pods that are not ready
"""
pod_names = []
rs_pods = get_pods(namespace, selector='redis.io/role=node')
if not rs_pods:
logger.info("Namespace '%s': cannot find redis enterprise pods", namespace)
return []
for rs_pod in rs_pods:
pod_name = rs_pod['metadata']['name']
if "status" in rs_pod and "containerStatuses" in rs_pod["status"]:
for container_status_entry in rs_pod["status"]["containerStatuses"]:
container_name = container_status_entry['name']
is_ready = container_status_entry["ready"]
if container_name == RLEC_CONTAINER_NAME and not is_ready:
pod_names.append(pod_name)
return pod_names
| 23,692
|
async def test_async_get_book(aresponses, readarr_client: ReadarrClient) -> None:
"""Test getting book info."""
aresponses.add(
"127.0.0.1:8787",
f"/api/{READARR_API}/book/0",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("readarr/book.json"),
),
match_querystring=True,
)
data = await readarr_client.async_get_book(bookid=0)
assert isinstance(data[0].id, int)
assert data[0].title == "string"
assert data[0].authorTitle == "string"
assert data[0].seriesTitle == "string"
assert data[0].disambiguation == "string"
assert data[0].overview == "string"
assert isinstance(data[0].authorId, int)
assert data[0].foreignBookId == "string"
assert isinstance(data[0].titleSlug, int)
assert data[0].monitored is True
assert data[0].anyEditionOk is True
assert isinstance(data[0].ratings.votes, int)
assert isinstance(data[0].ratings.value, float)
assert isinstance(data[0].ratings.popularity, int)
assert data[0].releaseDate == datetime(2021, 12, 7, 9, 7, 35, 508000)
assert isinstance(data[0].pageCount, int)
assert data[0].genres[0] == "string"
assert isinstance(data[0].author.id, int)
assert isinstance(data[0].author.authorMetadataId, int)
assert data[0].author.status == "string"
assert data[0].author.ended is True
assert data[0].author.authorName == "string"
assert data[0].author.authorNameLastFirst == "string"
assert data[0].author.foreignAuthorId == "string"
assert isinstance(data[0].author.titleSlug, int)
assert data[0].author.overview == "string"
assert data[0].author.disambiguation == "string"
assert data[0].author.links[0].url == "string"
assert data[0].author.links[0].name == "string"
_book = data[0].author.nextBook
assert isinstance(_book.id, int)
assert isinstance(_book.authorMetadataId, int)
assert _book.foreignBookId == "string"
assert isinstance(_book.titleSlug, int)
assert _book.title == "string"
assert _book.releaseDate == datetime(2021, 12, 7, 9, 7, 35, 508000)
assert _book.links[0].url == "string"
assert _book.links[0].name == "string"
assert _book.genres[0] == "string"
assert isinstance(_book.ratings.votes, int)
assert isinstance(_book.ratings.value, float)
assert isinstance(_book.ratings.popularity, int)
assert _book.cleanTitle == "string"
assert _book.monitored is True
assert _book.anyEditionOk is True
assert _book.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 508000)
assert _book.added == datetime(2021, 12, 7, 9, 7, 35, 508000)
assert _book.addOptions.addType == AddTypes.AUTOMATIC.value
assert _book.addOptions.searchForNewBook is True
_value = _book.authorMetadata.value
assert isinstance(_value.id, int)
assert _value.foreignAuthorId == "string"
assert isinstance(_value.titleSlug, int)
assert _value.name == "string"
assert _value.sortName == "string"
assert _value.nameLastFirst == "string"
assert _value.sortNameLastFirst == "string"
assert _value.aliases[0] == "string"
assert _value.overview == "string"
assert _value.disambiguation == "string"
assert _value.gender == "string"
assert _value.hometown == "string"
assert _value.born == datetime(2021, 12, 7, 9, 7, 35, 508000)
assert _value.died == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.status == "string"
assert _value.images[0].url == "string"
assert _value.images[0].coverType == ImageType.POSTER.value
assert _value.images[0].extension == "string"
assert _value.links[0].url == "string"
assert _value.links[0].name == "string"
assert _value.genres[0] == "string"
assert isinstance(_value.ratings.votes, int)
assert isinstance(_value.ratings.value, float)
assert isinstance(_value.ratings.popularity, int)
assert data[0].author.nextBook.authorMetadata.isLoaded is True
_value = data[0].author.nextBook.author.value
assert isinstance(_value.id, int)
assert isinstance(_value.authorMetadataId, int)
assert _value.cleanName == "string"
assert _value.monitored is True
assert _value.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.path == "string"
assert _value.rootFolderPath == "string"
assert _value.added == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert isinstance(_value.qualityProfileId, int)
assert isinstance(_value.metadataProfileId, int)
assert isinstance(_value.tags[0], int)
assert _value.addOptions.monitor == MonitoringOptionsType.ALL.value
assert _value.addOptions.booksToMonitor[0] == "string"
assert _value.addOptions.monitored is True
assert _value.addOptions.searchForMissingBooks is True
assert isinstance(_value.metadata.value.id, int)
assert _value.metadata.value.foreignAuthorId == "string"
assert isinstance(_value.metadata.value.titleSlug, int)
assert _value.metadata.value.name == "string"
assert _value.metadata.value.sortName == "string"
assert _value.metadata.value.nameLastFirst == "string"
assert _value.metadata.value.sortNameLastFirst == "string"
assert _value.metadata.value.aliases[0] == "string"
assert _value.metadata.value.overview == "string"
assert _value.metadata.value.disambiguation == "string"
assert _value.metadata.value.gender == "string"
assert _value.metadata.value.hometown == "string"
assert _value.metadata.value.born == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.metadata.value.died == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.metadata.value.status == "string"
assert _value.metadata.value.images[0].url == "string"
assert _value.metadata.value.images[0].coverType == ImageType.POSTER.value
assert _value.metadata.value.images[0].extension == "string"
assert _value.metadata.value.links[0].url == "string"
assert _value.metadata.value.links[0].name == "string"
assert _value.metadata.value.genres[0] == "string"
assert isinstance(_value.metadata.value.ratings.votes, int)
assert isinstance(_value.metadata.value.ratings.value, float)
assert isinstance(_value.metadata.value.ratings.popularity, int)
assert _value.metadata.isLoaded is True
assert isinstance(_value.qualityProfile.value.id, int)
assert _value.qualityProfile.value.name == "string"
assert _value.qualityProfile.value.upgradeAllowed is True
assert isinstance(_value.qualityProfile.value.cutoff, int)
assert isinstance(_value.qualityProfile.value.items[0].id, int)
assert _value.qualityProfile.value.items[0].name == "string"
assert isinstance(_value.qualityProfile.value.items[0].quality.id, int)
assert _value.qualityProfile.value.items[0].quality.name == "string"
assert _value.qualityProfile.value.items[0].items[0] is None
assert _value.qualityProfile.value.items[0].allowed is True
assert _value.qualityProfile.isLoaded is True
assert isinstance(_value.metadataProfile.value.id, int)
assert _value.metadataProfile.value.name == "string"
assert isinstance(_value.metadataProfile.value.minPopularity, int)
assert _value.metadataProfile.value.skipMissingDate is True
assert _value.metadataProfile.value.skipMissingIsbn is True
assert _value.metadataProfile.value.skipPartsAndSets is True
assert _value.metadataProfile.value.skipSeriesSecondary is True
assert _value.metadataProfile.value.allowedLanguages == "string"
assert isinstance(_value.metadataProfile.value.minPages, int)
assert _value.metadataProfile.value.ignored == "string"
assert _value.metadataProfile.isLoaded is True
assert _value.books.value[0] is None
assert _value.books.isLoaded is True
assert isinstance(_value.series.value[0].id, int)
assert _value.series.value[0].foreignSeriesId == "string"
assert _value.series.value[0].title == "string"
assert _value.series.value[0].description == "string"
assert _value.series.value[0].numbered is True
assert isinstance(_value.series.value[0].workCount, int)
assert isinstance(_value.series.value[0].primaryWorkCount, int)
assert _value.series.value[0].books.value[0] is None
assert _value.series.value[0].books.isLoaded is True
assert _value.series.value[0].foreignAuthorId == "string"
assert _value.series.isLoaded is True
assert _value.name == "string"
assert _value.foreignAuthorId == "string"
assert data[0].author.nextBook.author.isLoaded is True
_value = data[0].author.nextBook.editions.value[0]
assert isinstance(_value.id, int)
assert isinstance(_value.bookId, int)
assert _value.foreignEditionId == "string"
assert isinstance(_value.titleSlug, int)
assert _value.isbn13 == "string"
assert _value.asin == "string"
assert _value.title == "string"
assert _value.language == "string"
assert _value.overview == "string"
assert _value.format == "string"
assert _value.isEbook is True
assert _value.disambiguation == "string"
assert _value.publisher == "string"
assert isinstance(_value.pageCount, int)
assert _value.releaseDate == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.images[0].url == "string"
assert _value.images[0].coverType == ImageType.POSTER.value
assert _value.images[0].extension == "string"
assert _value.links[0].url == "string"
assert _value.links[0].name == "string"
assert isinstance(_value.ratings.votes, int)
assert isinstance(_value.ratings.value, float)
assert isinstance(_value.ratings.popularity, int)
assert _value.monitored is True
assert _value.manualAdd is True
assert _value.book.isLoaded is True
_valu = _value.bookFiles.value[0]
assert isinstance(_valu.id, int)
assert _valu.path == "string"
assert isinstance(_valu.size, int)
assert _valu.modified == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _valu.dateAdded == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _valu.sceneName == "string"
assert _valu.releaseGroup == "string"
assert isinstance(_valu.quality.quality.id, int)
assert _valu.quality.quality.name == "string"
assert isinstance(_valu.quality.revision.version, int)
assert isinstance(_valu.quality.revision.real, int)
assert _valu.quality.revision.isRepack is True
assert _valu.mediaInfo.audioFormat == "string"
assert isinstance(_valu.mediaInfo.audioBitrate, int)
assert isinstance(_valu.mediaInfo.audioChannels, float)
assert isinstance(_valu.mediaInfo.audioBits, int)
assert _valu.mediaInfo.audioSampleRate == "string"
assert isinstance(_valu.editionId, int)
assert isinstance(_valu.calibreId, int)
assert isinstance(_valu.part, int)
_val = _valu.author.value
assert isinstance(_val.id, int)
assert isinstance(_val.authorMetadataId, int)
assert _val.cleanName == "string"
assert _val.monitored is True
assert _val.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.path == "string"
assert _val.rootFolderPath == "string"
assert _val.added == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert isinstance(_val.qualityProfileId, int)
assert isinstance(_val.metadataProfileId, int)
assert isinstance(_val.tags[0], int)
assert _val.addOptions.monitor == MonitoringOptionsType.ALL.value
assert _val.addOptions.booksToMonitor[0] == "string"
assert _val.addOptions.monitored is True
assert _val.addOptions.searchForMissingBooks is True
assert isinstance(_val.metadata.value.id, int)
assert _val.metadata.value.foreignAuthorId == "string"
assert isinstance(_val.metadata.value.titleSlug, int)
assert _val.metadata.value.name == "string"
assert _val.metadata.value.sortName == "string"
assert _val.metadata.value.nameLastFirst == "string"
assert _val.metadata.value.sortNameLastFirst == "string"
assert _val.metadata.value.aliases[0] == "string"
assert _val.metadata.value.overview == "string"
assert _val.metadata.value.disambiguation == "string"
assert _val.metadata.value.gender == "string"
assert _val.metadata.value.hometown == "string"
assert _val.metadata.value.born == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.metadata.value.died == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.metadata.value.status == "string"
assert _val.metadata.value.images[0].url == "string"
assert _val.metadata.value.images[0].coverType == ImageType.POSTER.value
assert _val.metadata.value.images[0].extension == "string"
assert _val.metadata.value.links[0].url == "string"
assert _val.metadata.value.links[0].name == "string"
assert _val.metadata.value.genres[0] == "string"
assert isinstance(_val.metadata.value.ratings.votes, int)
assert isinstance(_val.metadata.value.ratings.value, float)
assert isinstance(_val.metadata.value.ratings.popularity, int)
assert _val.metadata.isLoaded is True
assert isinstance(_val.qualityProfile.value.id, int)
assert _val.qualityProfile.value.name == "string"
assert _val.qualityProfile.value.upgradeAllowed is True
assert isinstance(_val.qualityProfile.value.cutoff, int)
assert isinstance(_val.qualityProfile.value.items[0].id, int)
assert _val.qualityProfile.value.items[0].name == "string"
assert _val.qualityProfile.value.items[0].name == "string"
assert isinstance(_val.qualityProfile.value.items[0].quality.id, int)
assert _val.qualityProfile.value.items[0].quality.name == "string"
assert _val.qualityProfile.value.items[0].items[0] is None
assert _val.qualityProfile.value.items[0].allowed is True
assert _val.qualityProfile.isLoaded is True
assert isinstance(_val.metadataProfile.value.id, int)
assert _val.metadataProfile.value.name == "string"
assert isinstance(_val.metadataProfile.value.minPopularity, int)
assert _val.metadataProfile.value.skipMissingDate is True
assert _val.metadataProfile.value.skipMissingIsbn is True
assert _val.metadataProfile.value.skipPartsAndSets is True
assert _val.metadataProfile.value.skipSeriesSecondary is True
assert _val.metadataProfile.value.allowedLanguages == "string"
assert isinstance(_val.metadataProfile.value.minPages, int)
assert _val.metadataProfile.value.ignored == "string"
assert _val.metadataProfile.isLoaded is True
assert _val.books.value[0] is None
assert _val.books.isLoaded is True
assert isinstance(_val.series.value[0].id, int)
assert _val.series.value[0].foreignSeriesId == "string"
assert _val.series.value[0].title == "string"
assert _val.series.value[0].description == "string"
assert _val.series.value[0].numbered is True
assert isinstance(_val.series.value[0].workCount, int)
assert isinstance(_val.series.value[0].primaryWorkCount, int)
assert _val.series.value[0].books.value[0] is None
assert _val.series.value[0].books.isLoaded is True
assert _val.series.value[0].foreignAuthorId == "string"
assert _val.series.isLoaded is True
assert _val.name == "string"
assert _val.foreignAuthorId == "string"
assert _value.bookFiles.value[0].author.isLoaded is True
assert _value.bookFiles.value[0].edition.isLoaded is True
assert isinstance(_value.bookFiles.value[0].partCount, int)
assert _value.bookFiles.isLoaded is True
value = data[0].author.nextBook
assert isinstance(value.seriesLinks.value[0].id, int)
assert value.seriesLinks.value[0].position == "string"
assert isinstance(value.seriesLinks.value[0].seriesId, int)
assert isinstance(value.seriesLinks.value[0].bookId, int)
assert value.seriesLinks.value[0].isPrimary is True
assert isinstance(value.seriesLinks.value[0].series.value.id, int)
assert value.seriesLinks.value[0].series.value.foreignSeriesId == "string"
assert value.seriesLinks.value[0].series.value.title == "string"
assert value.seriesLinks.value[0].series.value.description == "string"
assert value.seriesLinks.value[0].series.value.numbered is True
assert isinstance(value.seriesLinks.value[0].series.value.workCount, int)
assert isinstance(value.seriesLinks.value[0].series.value.primaryWorkCount, int)
assert value.seriesLinks.value[0].series.value.books.value[0] is None
assert value.seriesLinks.value[0].series.value.books.isLoaded is True
assert value.seriesLinks.value[0].series.value.foreignAuthorId == "string"
assert value.seriesLinks.value[0].series.isLoaded is True
assert value.seriesLinks.value[0].book.isLoaded is True
assert value.seriesLinks.isLoaded is True
_book = data[0].author.lastBook
assert isinstance(_book.id, int)
assert isinstance(_book.authorMetadataId, int)
assert _book.foreignBookId == "string"
assert isinstance(_book.titleSlug, int)
assert _book.title == "string"
assert _book.releaseDate == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _book.links[0].url == "string"
assert _book.links[0].name == "string"
assert _book.genres[0] == "string"
assert isinstance(_book.ratings.votes, int)
assert isinstance(_book.ratings.value, float)
assert isinstance(_book.ratings.popularity, int)
assert _book.cleanTitle == "string"
assert _book.monitored is True
assert _book.anyEditionOk is True
assert _book.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _book.added == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _book.addOptions.addType == AddTypes.AUTOMATIC.value
assert _book.addOptions.searchForNewBook is True
_value = _book.authorMetadata.value
assert isinstance(_value.id, int)
assert _value.foreignAuthorId == "string"
assert isinstance(_value.titleSlug, int)
assert _value.name == "string"
assert _value.sortName == "string"
assert _value.nameLastFirst == "string"
assert _value.sortNameLastFirst == "string"
assert _value.aliases[0] == "string"
assert _value.overview == "string"
assert _value.disambiguation == "string"
assert _value.gender == "string"
assert _value.hometown == "string"
assert _value.born == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.died == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.status == "string"
assert _value.images[0].url == "string"
assert _value.images[0].coverType == ImageType.POSTER.value
assert _value.images[0].extension == "string"
assert _value.links[0].url == "string"
assert _value.links[0].name == "string"
assert _value.genres[0] == "string"
assert isinstance(_value.ratings.votes, int)
assert isinstance(_value.ratings.value, float)
assert isinstance(_value.ratings.popularity, int)
assert data[0].author.lastBook.authorMetadata.isLoaded is True
_value = data[0].author.lastBook.author.value
assert isinstance(_value.id, int)
assert isinstance(_value.authorMetadataId, int)
assert _value.cleanName == "string"
assert _value.monitored is True
assert _value.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.path == "string"
assert _value.rootFolderPath == "string"
assert _value.added == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert isinstance(_value.qualityProfileId, int)
assert isinstance(_value.metadataProfileId, int)
assert isinstance(_value.tags[0], int)
assert _value.addOptions.monitor == MonitoringOptionsType.ALL.value
assert _value.addOptions.booksToMonitor[0] == "string"
assert _value.addOptions.monitored is True
assert _value.addOptions.searchForMissingBooks is True
assert isinstance(_value.metadata.value.id, int)
assert _value.metadata.value.foreignAuthorId == "string"
assert isinstance(_value.metadata.value.titleSlug, int)
assert _value.metadata.value.name == "string"
assert _value.metadata.value.sortName == "string"
assert _value.metadata.value.nameLastFirst == "string"
assert _value.metadata.value.sortNameLastFirst == "string"
assert _value.metadata.value.aliases[0] == "string"
assert _value.metadata.value.overview == "string"
assert _value.metadata.value.disambiguation == "string"
assert _value.metadata.value.gender == "string"
assert _value.metadata.value.hometown == "string"
assert _value.metadata.value.born == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.metadata.value.died == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.metadata.value.status == "string"
assert _value.metadata.value.images[0].url == "string"
assert _value.metadata.value.images[0].coverType == ImageType.POSTER.value
assert _value.metadata.value.images[0].extension == "string"
assert _value.metadata.value.links[0].url == "string"
assert _value.metadata.value.links[0].name == "string"
assert _value.metadata.value.genres[0] == "string"
assert isinstance(_value.metadata.value.ratings.votes, int)
assert isinstance(_value.metadata.value.ratings.value, float)
assert isinstance(_value.metadata.value.ratings.popularity, int)
assert _value.metadata.isLoaded is True
assert isinstance(_value.qualityProfile.value.id, int)
assert _value.qualityProfile.value.name == "string"
assert _value.qualityProfile.value.upgradeAllowed is True
assert isinstance(_value.qualityProfile.value.cutoff, int)
assert isinstance(_value.qualityProfile.value.items[0].id, int)
assert _value.qualityProfile.value.items[0].name == "string"
assert isinstance(_value.qualityProfile.value.items[0].quality.id, int)
assert _value.qualityProfile.value.items[0].quality.name == "string"
assert _value.qualityProfile.value.items[0].items[0] is None
assert _value.qualityProfile.value.items[0].allowed is True
assert _value.qualityProfile.isLoaded is True
assert isinstance(_value.metadataProfile.value.id, int)
assert _value.metadataProfile.value.name == "string"
assert isinstance(_value.metadataProfile.value.minPopularity, int)
assert _value.metadataProfile.value.skipMissingDate is True
assert _value.metadataProfile.value.skipMissingIsbn is True
assert _value.metadataProfile.value.skipPartsAndSets is True
assert _value.metadataProfile.value.skipSeriesSecondary is True
assert _value.metadataProfile.value.allowedLanguages == "string"
assert isinstance(_value.metadataProfile.value.minPages, int)
assert _value.metadataProfile.value.ignored == "string"
assert _value.metadataProfile.isLoaded is True
assert _value.books.value[0] is None
assert _value.books.isLoaded is True
assert isinstance(_value.series.value[0].id, int)
assert _value.series.value[0].foreignSeriesId == "string"
assert _value.series.value[0].title == "string"
assert _value.series.value[0].description == "string"
assert _value.series.value[0].numbered is True
assert isinstance(_value.series.value[0].workCount, int)
assert isinstance(_value.series.value[0].primaryWorkCount, int)
assert _value.series.value[0].books.value[0] is None
assert _value.series.value[0].books.isLoaded is True
assert _value.series.value[0].foreignAuthorId == "string"
assert _value.series.isLoaded is True
assert _value.name == "string"
assert _value.foreignAuthorId == "string"
assert data[0].author.lastBook.author.isLoaded is True
_value = data[0].author.lastBook.editions.value[0]
assert isinstance(_value.id, int)
assert isinstance(_value.bookId, int)
assert _value.foreignEditionId == "string"
assert isinstance(_value.titleSlug, int)
assert _value.isbn13 == "string"
assert _value.asin == "string"
assert _value.title == "string"
assert _value.language == "string"
assert _value.overview == "string"
assert _value.format == "string"
assert _value.isEbook is True
assert _value.disambiguation == "string"
assert _value.publisher == "string"
assert isinstance(_value.pageCount, int)
assert _value.releaseDate == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _value.images[0].url == "string"
assert _value.images[0].coverType == ImageType.POSTER.value
assert _value.images[0].extension == "string"
assert _value.links[0].url == "string"
assert _value.links[0].name == "string"
assert isinstance(_value.ratings.votes, int)
assert isinstance(_value.ratings.value, float)
assert isinstance(_value.ratings.popularity, int)
assert _value.monitored is True
assert _value.manualAdd is True
assert _value.book.isLoaded is True
_val = _value.bookFiles.value[0]
assert isinstance(_val.id, int)
assert _val.path == "string"
assert isinstance(_val.size, int)
assert _val.modified == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.dateAdded == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.sceneName == "string"
assert _val.releaseGroup == "string"
assert isinstance(_val.quality.quality.id, int)
assert _val.quality.quality.name == "string"
assert isinstance(_val.quality.revision.version, int)
assert isinstance(_val.quality.revision.real, int)
assert _val.quality.revision.isRepack is True
assert _val.mediaInfo.audioFormat == "string"
assert isinstance(_val.mediaInfo.audioBitrate, int)
assert isinstance(_val.mediaInfo.audioChannels, float)
assert isinstance(_val.mediaInfo.audioBits, int)
assert _val.mediaInfo.audioSampleRate == "string"
assert isinstance(_val.editionId, int)
assert isinstance(_val.calibreId, int)
assert isinstance(_val.part, int)
assert isinstance(_val.author.value.id, int)
assert isinstance(_val.author.value.authorMetadataId, int)
assert _val.author.value.cleanName == "string"
assert _val.author.value.monitored is True
assert _val.author.value.lastInfoSync == datetime(2021, 12, 7, 9, 7, 35, 509000)
assert _val.author.value.path == "string"
assert _val.author.value.rootFolderPath == "string"
assert _val.author.value.added == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert isinstance(_val.author.value.qualityProfileId, int)
assert isinstance(_val.author.value.metadataProfileId, int)
assert isinstance(_val.author.value.tags[0], int)
assert _val.author.value.addOptions.monitor == MonitoringOptionsType.ALL.value
assert _val.author.value.addOptions.booksToMonitor[0] == "string"
assert _val.author.value.addOptions.monitored is True
assert _val.author.value.addOptions.searchForMissingBooks is True
_valu = _val.author.value.metadata.value
assert isinstance(_valu.id, int)
assert _valu.foreignAuthorId == "string"
assert isinstance(_valu.titleSlug, int)
assert _valu.name == "string"
assert _valu.sortName == "string"
assert _valu.nameLastFirst == "string"
assert _valu.sortNameLastFirst == "string"
assert _valu.aliases[0] == "string"
assert _valu.overview == "string"
assert _valu.disambiguation == "string"
assert _valu.gender == "string"
assert _valu.hometown == "string"
assert _valu.born == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert _valu.died == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert _valu.status == "string"
assert _valu.images[0].url == "string"
assert _valu.images[0].coverType == ImageType.POSTER.value
assert _valu.images[0].extension == "string"
assert _valu.links[0].url == "string"
assert _valu.links[0].name == "string"
assert _valu.genres[0] == "string"
assert isinstance(_valu.ratings.votes, int)
assert isinstance(_valu.ratings.value, float)
assert isinstance(_valu.ratings.popularity, int)
assert _val.author.value.metadata.isLoaded is True
assert isinstance(_val.author.value.qualityProfile.value.id, int)
assert _val.author.value.qualityProfile.value.name == "string"
assert _val.author.value.qualityProfile.value.upgradeAllowed is True
assert isinstance(_val.author.value.qualityProfile.value.cutoff, int)
assert isinstance(_val.author.value.qualityProfile.value.items[0].id, int)
assert _val.author.value.qualityProfile.value.items[0].name == "string"
assert _val.author.value.qualityProfile.value.items[0].name == "string"
assert isinstance(_val.author.value.qualityProfile.value.items[0].quality.id, int)
assert _val.author.value.qualityProfile.value.items[0].quality.name == "string"
assert _val.author.value.qualityProfile.value.items[0].items[0] is None
assert _val.author.value.qualityProfile.value.items[0].allowed is True
assert _val.author.value.qualityProfile.isLoaded is True
assert isinstance(_val.author.value.metadataProfile.value.id, int)
assert _val.author.value.metadataProfile.value.name == "string"
assert isinstance(_val.author.value.metadataProfile.value.minPopularity, int)
assert _val.author.value.metadataProfile.value.skipMissingDate is True
assert _val.author.value.metadataProfile.value.skipMissingIsbn is True
assert _val.author.value.metadataProfile.value.skipPartsAndSets is True
assert _val.author.value.metadataProfile.value.skipSeriesSecondary is True
assert _val.author.value.metadataProfile.value.allowedLanguages == "string"
assert isinstance(_val.author.value.metadataProfile.value.minPages, int)
assert _val.author.value.metadataProfile.value.ignored == "string"
assert _val.author.value.metadataProfile.isLoaded is True
assert _val.author.value.books.value[0] is None
assert _val.author.value.books.isLoaded is True
assert isinstance(_val.author.value.series.value[0].id, int)
assert _val.author.value.series.value[0].foreignSeriesId == "string"
assert _val.author.value.series.value[0].title == "string"
assert _val.author.value.series.value[0].description == "string"
assert _val.author.value.series.value[0].numbered is True
assert isinstance(_val.author.value.series.value[0].workCount, int)
assert isinstance(_val.author.value.series.value[0].primaryWorkCount, int)
assert _val.author.value.series.value[0].books.value[0] is None
assert _val.author.value.series.value[0].books.isLoaded is True
assert _val.author.value.series.value[0].foreignAuthorId == "string"
assert _val.author.value.series.isLoaded is True
assert _val.author.value.name == "string"
assert _val.author.value.foreignAuthorId == "string"
assert _val.author.isLoaded is True
assert _val.edition.isLoaded is True
assert isinstance(_val.partCount, int)
assert _value.bookFiles.isLoaded is True
value = data[0].author.lastBook
assert isinstance(value.seriesLinks.value[0].id, int)
assert value.seriesLinks.value[0].position == "string"
assert isinstance(value.seriesLinks.value[0].seriesId, int)
assert isinstance(value.seriesLinks.value[0].bookId, int)
assert value.seriesLinks.value[0].isPrimary is True
assert isinstance(value.seriesLinks.value[0].series.value.id, int)
assert value.seriesLinks.value[0].series.value.foreignSeriesId == "string"
assert value.seriesLinks.value[0].series.value.title == "string"
assert value.seriesLinks.value[0].series.value.description == "string"
assert value.seriesLinks.value[0].series.value.numbered is True
assert isinstance(value.seriesLinks.value[0].series.value.workCount, int)
assert isinstance(value.seriesLinks.value[0].series.value.primaryWorkCount, int)
assert value.seriesLinks.value[0].series.value.books.value[0] is None
assert value.seriesLinks.value[0].series.value.books.isLoaded is True
assert value.seriesLinks.value[0].series.value.foreignAuthorId == "string"
assert value.seriesLinks.value[0].series.isLoaded is True
assert value.seriesLinks.value[0].book.isLoaded is True
assert value.seriesLinks.isLoaded is True
assert data[0].author.images[0].url == "string"
assert data[0].author.images[0].coverType == ImageType.POSTER.value
assert data[0].author.images[0].extension == "string"
assert data[0].author.remotePoster == "string"
assert data[0].author.path == "string"
assert isinstance(data[0].author.qualityProfileId, int)
assert isinstance(data[0].author.metadataProfileId, int)
assert data[0].author.monitored is True
assert data[0].author.rootFolderPath == "string"
assert data[0].author.genres[0] == "string"
assert data[0].author.cleanName == "string"
assert data[0].author.sortName == "string"
assert data[0].author.sortNameLastFirst == "string"
assert isinstance(data[0].author.tags[0], int)
assert data[0].author.added == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert data[0].author.addOptions.monitor == MonitoringOptionsType.ALL.value
assert data[0].author.addOptions.booksToMonitor[0] == "string"
assert data[0].author.addOptions.monitored is True
assert data[0].author.addOptions.searchForMissingBooks is True
assert isinstance(data[0].author.ratings.votes, int)
assert isinstance(data[0].author.ratings.value, float)
assert isinstance(data[0].author.ratings.popularity, int)
assert isinstance(data[0].author.statistics.bookFileCount, int)
assert isinstance(data[0].author.statistics.bookCount, int)
assert isinstance(data[0].author.statistics.availableBookCount, int)
assert isinstance(data[0].author.statistics.totalBookCount, int)
assert isinstance(data[0].author.statistics.sizeOnDisk, int)
assert isinstance(data[0].author.statistics.percentOfBooks, float)
assert data[0].images[0].url == "string"
assert data[0].images[0].coverType == ImageType.POSTER.value
assert data[0].images[0].extension == "string"
assert data[0].links[0].url == "string"
assert data[0].links[0].name == "string"
assert isinstance(data[0].statistics.bookFileCount, int)
assert isinstance(data[0].statistics.bookCount, int)
assert isinstance(data[0].statistics.totalBookCount, int)
assert isinstance(data[0].statistics.sizeOnDisk, int)
assert isinstance(data[0].statistics.percentOfBooks, float)
assert data[0].added == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert data[0].addOptions.addType == AddTypes.AUTOMATIC.value
assert data[0].addOptions.searchForNewBook is True
assert data[0].remoteCover == "string"
assert isinstance(data[0].editions[0].id, int)
assert isinstance(data[0].editions[0].bookId, int)
assert data[0].editions[0].foreignEditionId == "string"
assert isinstance(data[0].editions[0].titleSlug, int)
assert data[0].editions[0].isbn13 == "string"
assert data[0].editions[0].asin == "string"
assert data[0].editions[0].title == "string"
assert data[0].editions[0].language == "string"
assert data[0].editions[0].overview == "string"
assert data[0].editions[0].format == "string"
assert data[0].editions[0].isEbook is True
assert data[0].editions[0].disambiguation == "string"
assert data[0].editions[0].publisher == "string"
assert isinstance(data[0].editions[0].pageCount, int)
assert data[0].editions[0].releaseDate == datetime(2021, 12, 7, 9, 7, 35, 510000)
assert data[0].editions[0].images[0].url == "string"
assert data[0].editions[0].images[0].coverType == ImageType.POSTER.value
assert data[0].editions[0].images[0].extension == "string"
assert data[0].editions[0].links[0].url == "string"
assert data[0].editions[0].links[0].name == "string"
assert isinstance(data[0].editions[0].ratings.votes, int)
assert isinstance(data[0].editions[0].ratings.value, float)
assert isinstance(data[0].editions[0].ratings.popularity, int)
assert data[0].editions[0].monitored is True
assert data[0].editions[0].manualAdd is True
assert data[0].editions[0].remoteCover == "string"
assert data[0].editions[0].grabbed is True
assert data[0].grabbed is True
| 23,693
|
def lookup_material_probase(information_extractor, query, num):
"""Lookup material in Probase"""
material_params = {
'instance': query,
'topK': num
}
result = information_extractor.lookup_probase(material_params)
rank = information_extractor.rank_probase_result_material(result)
return rank
| 23,694
|
def get_unexpected_exit_events(op):
"""Return all unexpected exit status events."""
events = get_events(op)
if not events:
return None
return [e for e in events if is_unexpected_exit_status_event(e)]
| 23,695
|
def count_reads(config_file, input_dir, tmp_dir, output_dir,
read_counts_file, log_file, run_config):
"""
Count reads using :py:mod:`riboviz.tools.count_reads`.
:param config_file: Configuration file (input)
:type config_file: str or unicode
:param input_dir: Input directory
:type input_dir: str or unicode
:param tmp_dir: Temporary directory
:type tmp_dir: str or unicode
:param output_dir: Output directory
:type output_dir: str or unicode
:param read_counts_file: Read counts file (output)
:type read_counts_file: str or unicode
:param log_file: Log file (output)
:type log_file: str or unicode
:param run_config: Run-related configuration
:type run_config: RunConfigTuple
:raise FileNotFoundError: if ``python`` cannot be found
:raise AssertionError: if ``python`` returns a non-zero exit \
code
"""
LOGGER.info("Count reads. Log: %s", log_file)
cmd = ["python", "-m", count_reads_module.__name__,
"-c", config_file,
"-i", input_dir,
"-t", tmp_dir,
"-o", output_dir,
"-r", read_counts_file]
process_utils.run_logged_command(cmd,
log_file,
run_config.cmd_file,
run_config.is_dry_run)
| 23,696
|
async def delete_user(username: str) -> GenericResponse:
"""Delete concrete user by username"""
try:
await MongoDbWrapper().remove_user(username)
except Exception as exception_message:
raise DatabaseException(error=exception_message)
return GenericResponse(detail="Deleted user")
| 23,697
|
def concat_files(concat_filename="similar.txt"):
"""
searches for a file named similar.txt in sub directories and writes the content of this
to a file in the main directory
"""
inpath = concatPath("")
outstring = ""
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not inpath == dirpath: continue
for dirname in dirnames:
filename = dirname + os.sep + concat_filename
if not os.path.isfile(filename): continue
ifile = open(filename, "r")
for line in ifile.readlines():
outstring += line
ifile.close()
os.remove(filename)
writeToFile(inpath + os.sep + concat_filename, outstring)
| 23,698
|
def get_changelog():
"""download ChangeLog.txt from github, extract latest version number, return a tuple of (latest_version, contents)
"""
# url will be chosen depend on frozen state of the application
source_code_url = 'https://github.com/pyIDM/pyIDM/raw/master/ChangeLog.txt'
new_release_url = 'https://github.com/pyIDM/pyIDM/releases/download/extra/ChangeLog.txt'
url = new_release_url if config.FROZEN else source_code_url
# url = new_release_url
# get BytesIO object
log('check for PyIDM latest version ...')
buffer = download(url, verbose=False)
if buffer:
# convert to string
contents = buffer.getvalue().decode()
# extract version number from contents
latest_version = contents.splitlines()[0].replace(':', '').strip()
return latest_version, contents
else:
log("check_for_update() --> couldn't check for update, url is unreachable")
return None
| 23,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.