content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def sgrib_variable_crop(tmp_grib, nthreads_w, fp_out, logger):
"""
Take the small grib file from grib_to_small_grib and cut it down
to the variables we need
Args:
tmp_grib: File path to small grib2 file
nthreads_w: Number of threads for running wgrib2 commands
fp_out: Path for outputting final grib2 file
Returns:
"""
# call to grab correct variables
action2 = "wgrib2 {} -ncpu {} -match \
'TMP:2 m|UGRD:10 m|VGRD:10 m|TCDC:' -GRIB {}"
action2 = action2.format(tmp_grib,
nthreads_w,
fp_out)
fatl = call_wgrib2(action2, logger)
if fatl:
logger.warning(
'Cutting variables from grib did not work')
os.remove(tmp_grib)
return not fatl
| 8,700
|
def cs_geo():
"""Geographic lat/lon coordinates in WGS84 datum.
"""
cs = CSGeo()
cs.inventory.datumHoriz = "WGS84"
cs.inventory.datumVert = "mean sea level"
cs.inventory.spaceDim = 2
cs._configure()
cs.initialize()
return cs
| 8,701
|
def make_range(value):
"""
Given an integer 'value',
return the value converted into a range.
"""
return range(value)
| 8,702
|
def run(actor, observer, content):
"""
Shortcut to run an Onirim and return the result.
Returns:
True if win, False if lose, None if other exception thrown.
"""
return Flow(Core(actor, observer, content)).whole()
| 8,703
|
def sort_bedfile(infile, outfile, add_header: bool = True, sort_by_bedtools: bool = False):
"""
sort bed file
@2020.10.10 by Zhang Yiming: several modifications
1. if infile and outfile is same, use a temp file
2. add parameter to contol the bed header
3. using check_output to better handle the output from command line tools
"""
if infile == outfile:
outfile = outfile + ".sorted"
if sort_by_bedtools:
pb.BedTool(infile).sort().saveas(outfile)
else:
with open(outfile, 'w') as o:
if add_header:
o.write('track name=\"' + os.path.basename(infile).replace('.bed', '') + '\"\n')
cmd = ['sort', '-k1,1', '-k2,2n', '-T', os.path.dirname(outfile), infile]
stdout, _ = run_command(cmd)
if stdout:
o.write(stdout.decode("utf-8"))
o.close()
if outfile.endswith(".sorted"):
os.rename(outfile, outfile.replace(".sorted", ""))
| 8,704
|
def GetPoseBoneFCurveFromArmature(armatureObj, poseBoneName, data_path, parameterIndex):
"""
In Blender the FCurves are used to define the Key Frames.
In general, for a single object, there's one FCurve for each of
the following properties.
data_path, index
'location', 0 (.x)
'location', 1 (.y)
'location', 2 (.z)
'rotation_quaternion', 0 (.w)
'rotation_quaternion', 1 (.x)
'rotation_quaternion', 2 (.y)
'rotation_quaternion', 3 (.z)
'scale', 0 (.x)
'scale', 1 (.y)
'scale', 2 (.z)
For more tips about this, see: https://docs.blender.org/api/blender_python_api_2_75_release/info_quickstart.html#animation
Returns a bpy.types.FCurve
"""
completePath = BuildPoseBoneFCurveDataPath(poseBoneName, data_path)
return armatureObj.animation_data.action.fcurves.find(completePath, index=parameterIndex)
| 8,705
|
def get_document_instance(conf=None):
"""
Helper function to get a database Document model instance based on CLA configuration.
:param conf: Same as get_database_models().
:type conf: dict
:return: A Document model instance based on configuration specified.
:rtype: cla.models.model_interfaces.Document
"""
return get_database_models(conf)['Document']()
| 8,706
|
def test_execute_empty_resolved(_mock_call_insights):
"""Test the function execute."""
with open("tests/data/stack_aggregator_empty_resolved.json", "r") as f:
payload = json.load(f)
r = RecommendationTask()
out = r.execute(arguments=payload, persist=False)
assert out['recommendation'] == "success"
assert not out["result"]["recommendations"][0]["companion"]
assert not out["result"]["recommendations"][0]["alternate"]
assert not out["result"]["recommendations"][0]["usage_outliers"]
r = RecommendationTask()
out = r.execute(arguments=payload, check_license=True, persist=False)
assert out['recommendation'] == "success"
out = r.execute(arguments=payload, persist=True)
assert out['recommendation'] == "database error"
| 8,707
|
def test_apptuit_send_exception_repr():
"""
Test __repr__ for ApptuitSendException
"""
err = repr(ApptuitSendException(
"test", 400, 1, 1, [{"datapoint": "test", "error": "test_error"}]
))
assert_equals(err, "1 points failed with status: 400\ntest_error error occurred in the "
"datapoint test\n")
| 8,708
|
def get_full_private_keys(gpg: gnupg.GPG) -> List[GPGKey]:
"""Get a list of private keys with a full private part.
GPG supports exporting only the subkeys for a given key, and in this case
a stub of the primary private key is also exported (the stub). This stub
cannot be used to do anything with the primary key, so it's useful to list
only keys that can actually be used.
:param gpg: The GPG interface used by the gnupg library
:return: The list of fully available private keys in the keyring
"""
return [key for key in get_private_keys(gpg) if key.key_token == KeyToken.FULL]
| 8,709
|
def create_blueprint():
"""Creates a Blueprint"""
blueprint = Blueprint('Health Check Blueprint', __name__)
blueprint.route('/')(healthcheck.healthcheck)
return blueprint
| 8,710
|
def main() -> None:
"""Run Sub Manager though the CLI."""
submanager.cli.main()
| 8,711
|
def show_Permissions(dx):
"""
Show where permissions are used in a specific application
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
p = dx.get_permissions( [] )
for i in p:
print i, ":"
for j in p[i]:
show_Path( dx.get_vm(), j )
| 8,712
|
def playable_card(card, fireworks, n_colors):
# if isinstance(card, pyhanabi.HanabiCard):
# card = {'color':colors[card.color],'rank':card.rank}
"""A card is playable if it can be placed on the fireworks pile."""
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank != pyhanabi.HanabiCard.RankType.kUnknownRank):
for color in range(n_colors):
if fireworks[color] == card.rank:
continue
else:
return False
return True
# elif card['color'] == None or card['rank'] == None:
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank == pyhanabi.HanabiCard.RankType.kUnknownRank):
return False
else:
return card.rank == fireworks[card.color]
| 8,713
|
def get_transformation_id(action):
""" Get the id of a transformation.
Parameters
----------
action: function
The transformation function
Returns
-------
int
The id of the action (-1 if not found)
"""
for index, trans in TRANSFORMATIONS.items():
if trans == action:
return index
return -1
| 8,714
|
def getString(t):
"""If t is of type string, return it, otherwise raise InvalidTypeError.
"""
s = c_char_p()
if PL_get_chars(t, byref(s), REP_UTF8|CVT_STRING):
return s.value
else:
raise InvalidTypeError("string")
| 8,715
|
def prep_data(filename, in_len, pred_len):
"""load data from the file and chunk it into windows of input"""
# Columns are
# 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed
data = np.genfromtxt(filename, delimiter=',', skip_header=1,
usecols=(1, 2, 3, 4, 5), dtype=float)
# Remove rows that are missing values
data = data[~np.isnan(data).any(axis=1)]
# We will save the last 1/8th of the data for validation/testing data,
# 1/16 for validation, 1/16 for testing
total_len = data.shape[0]
val_len = total_len // 16
test_len = total_len // 16
train_len = total_len - val_len - test_len
train_data = data[:train_len]
val_data = data[train_len:train_len + val_len]
test_data = data[train_len + val_len:]
# To stay in the most accurate ranges of the ESN, and to put the various
# features on equal footing, we standardize the training data.
train_data, mu_arr, sigma_arr = standardize_traindata(train_data)
# We now need to scale our validation and test data by the means and standard
# deviations determined from the training data
val_data = scale_data(val_data, mu_arr, sigma_arr)
test_data = scale_data(test_data, mu_arr, sigma_arr)
# We need to convert the time series data to forecast form for one-step
# prediction training. For simplicity we will discard the remainder batches rU, rY
train_batch_size = 200
val_batch_size = in_len + pred_len + 1
test_batch_size = in_len + pred_len + 1
trainU, trainY, rU, rY = to_forecast_form(train_data, batch_size=train_batch_size)
valU, valY, rU, rY = to_forecast_form(val_data, batch_size=val_batch_size)
testU, testY, rU, rY = to_forecast_form(test_data, batch_size=test_batch_size)
return trainU, trainY, valU, valY, testU, testY, mu_arr, sigma_arr
| 8,716
|
def should_build_ib():
"""
Helper function that detects the system's IB support and returns if we
should build with IB support.
"""
ib_util_found = False
ib_lib_found = False
ib_header_found = False
try:
# If the command doesn't exist, we can directly return instead of
# making a subprocess call
full_cmd_path = get_command_path(IB_DEVINFO_CMD)
if not full_cmd_path:
ib_util_found = False
subprocess.check_output([full_cmd_path, "--list"])
# Here we just would like to simply run the command to test if IB
# related tools / lib are installed without parsing the output. We
# will enable IB build as long as the command runs successfully.
#
# The output should look like either:
#
# > ibv_devinfo --list
# 0 HCAs founds:
#
# or
#
# > ibv_devinfo --list
# 4 HCAs found:
# mlx5_3
# mlx5_2
# mlx5_1
# mlx5_0
ib_util_found = True
except Exception:
# We just take all the exceptions here without affecting the build
ib_util_found = False
lib_paths = list(filter(bool, [
"/usr/lib/",
"/usr/lib/x86_64-linux-gnu/",
"/usr/lib/powerpc64le-linux-gnu/",
"/usr/lib/aarch64-linux-gnu/",
] + gather_paths([
"LIBRARY_PATH",
]) + gather_paths([
"LD_LIBRARY_PATH",
])))
include_paths = [
"/usr/include/",
]
if IS_CONDA:
lib_paths.append(os.path.join(CONDA_DIR, "lib"))
include_paths.append(os.path.join(CONDA_DIR, "include"))
for path in lib_paths:
if path is None or not os.path.exists(path):
continue
ib_libraries = sorted(glob.glob(os.path.join(path, "libibverbs*")))
if ib_libraries:
ib_lib_found = True
break
for path in include_paths:
if path is None or not os.path.exists(path):
continue
if os.path.exists(os.path.join(path, "infiniband/verbs.h")):
ib_header_found = True
break
return ib_util_found and ib_lib_found and ib_lib_found
| 8,717
|
def run_reporter():
"""
Entry point to reporter service
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--host",
help="Reporter's host, defaults to localhost",
default="localhost",
action="store",
)
parser.add_argument(
"--port",
help="Reporter's port, defaults to 8555",
default=8555,
action="store",
)
args = parser.parse_args()
reporter_service(args.host, int(args.port))
| 8,718
|
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| 8,719
|
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_EVAL_BATCH_SIZE)
model = _build_keras_model()
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps)
# NEW: Save a computation graph including transform layer.
signatures = {
'serving_default': _get_serve_tf_examples_fn(model, tf_transform_output),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
| 8,720
|
def _get_security_group_id(connection, security_group_name):
"""
Takes a security group name and
returns the ID. If the name cannot be
found, the name will be attempted
as an ID. The first group found by
this name or ID will be used.)
:param connection:
:param security_group_name:
:return:
"""
if not security_group_name:
print('The bees need a security group to run under. Need to open a port from where you are to the target '
'subnet.')
return
# Try by name
security_groups = connection.describe_security_groups(
Filters=[{'Name': 'group-name', 'Values': [security_group_name, ]}, ]
)
security_groups = security_groups['SecurityGroups']
if not security_groups:
# Try by id
security_groups = connection.describe_security_groups(
Filters=[{'Name': 'group-id', 'Values': [security_group_name, ]}, ]
)
security_groups = security_groups['SecurityGroups']
if not security_groups:
print('The bees need a security group to run under. The one specified was not found. '
'Create a sg that has access to port 22 ie. from 0.0.0.0/0')
return
return security_groups[0]['GroupId'] if security_groups else None
| 8,721
|
def viz_preprocessing(df_path):
"""
Preprocess the aggregation csv into a good format for visualization
"""
df = pd.read_csv(df_path)
res = df.T
res = res.rename(columns=res.iloc[0]).drop(res.index[0])
res = res.astype("int64")
res.reset_index(inplace=True)
res["index"] = res["index"].apply(
lambda x: "{}-{}-{}".format(x[0:4], x[4:6], x[6:])
)
res["index"] = pd.to_datetime(res["index"])
return res
| 8,722
|
def test_get_filenames_with_multiple_attachments_data(
signal_notification_service: SignalNotificationService,
) -> None:
"""Test getting filenames with multiple 'attachments' in data."""
data = {"attachments": ["test", "test2"]}
result = signal_notification_service.get_filenames(data)
assert result == ["test", "test2"]
| 8,723
|
def build_encoded_broadcast_from_model(model_fn, encoder_fn):
"""Builds `StatefulBroadcastFn` for weights of model returned by `model_fn`.
This method creates a `SimpleEncoder` for every weight of model created by
`model_fn`, as returned by `encoder_fn`.
Args:
model_fn: A Python callable with no arguments function that returns a
`tff.learning.Model`.
encoder_fn: A Python callable with a single argument, which is expected to
be a `tf.Tensor` of shape and dtype to be encoded. The function must
return a `tensor_encoding.core.SimpleEncoder`, which expects a `tf.Tensor`
with compatible type as the input to its `encode` method.
Returns:
A `StatefulBroadcastFn` for encoding and broadcasting the weights of model
created by `model_fn`.
Raises:
TypeError: If `model_fn` or `encoder_fn` are not callable objects.
"""
py_typecheck.check_callable(model_fn)
py_typecheck.check_callable(encoder_fn)
# TODO(b/144382142): Keras name uniquification is probably the main reason we
# still need this.
with tf.Graph().as_default():
values = model_utils.enhance(model_fn()).weights
encoders = tf.nest.map_structure(encoder_fn, values)
return tff.utils.build_encoded_broadcast(values, encoders)
| 8,724
|
def adjacency(G, nodelist=None, weight="weight"):
"""
Returns the sparse adjacency matrix
representation of the graph.
"""
if nodelist is None:
nodelist = G.nodes()
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
return A
| 8,725
|
def sample_movie(user, **params):
"""Create and return a movie"""
defaults = {
'title': 'A Walk to Remember',
'duration': datetime.timedelta(hours=2, minutes=15),
'price': 8.99
}
defaults.update(params)
return Movie.objects.create(user=user, **defaults)
| 8,726
|
def set_edit():
""" Set to Edit Mode - Note may be buggy """
try:
bpy.ops.object.mode_set(mode = "EDIT")
except:
pass
| 8,727
|
def lines_diff(lines1, lines2):
"""Show difference between lines."""
is_diff = False
diffs = list()
for line in difflib.ndiff(lines1, lines2):
if not is_diff and line[0] in ('+', '-'):
is_diff = True
diffs.append(line)
return is_diff, diffs
| 8,728
|
def parse_ipmi_hpm(output):
"""Parse the output of the hpm info retrieved with ipmitool"""
hrdw = []
line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|')
for line in output:
match = line_pattern.match(line)
if match:
name = match.group(2).strip()
version = match.group(3).strip().split(" ")[0]
hrdw.append(('firmware', name, 'version', version))
return hrdw
| 8,729
|
def export_trust_stores() -> None:
"""Export the content of the trust store of each supported platform to a PEM file at ./export.
"""
certs_repo = RootCertificatesRepository.get_default()
out_pem_folder = ROOT_PATH / 'export'
out_pem_folder.mkdir(exist_ok=True)
# Export each trust store as a PEM file to ./export
print(f'Exporting stores as PEM to {out_pem_folder}...')
for platform in PlatformEnum:
print(f'Exporting {platform.name}...')
store = TrustStore.get_default_for_platform(platform)
all_certs_pem = store.export_trusted_certificates_as_pem(certs_repo)
out_pem_path = out_pem_folder / f'{platform.name.lower()}.pem'
with open(out_pem_path, mode='w') as out_pem_file:
out_pem_file.write(all_certs_pem)
| 8,730
|
def visualize_model(model, num_images=8, figsize=(15,15)):
"""Show a grid of predictions"""
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure(figsize=figsize)
dataloader_val = dataloaders['val']
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloader_val):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images // 6, 6, images_so_far)
ax.axis("off")
ax.set_title(
"predicted: {}\nTrue {}".format(dataloader_val.dataset.classes[preds[j]], dataloader_val.dataset.classes[labels[j]])
)
if dataloader_val.dataset.classes[preds[j]] != dataloader_val.dataset.classes[labels[j]]:
ax.set_title(dataloader_val.dataset.classes[preds[j]],color='red')
else:
ax.set_title(dataloader_val.dataset.classes[preds[j]],color='green')
plt.imshow(
inputs.cpu().data[j].numpy()[0],
interpolation="bicubic",
cmap='seismic'
)
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
| 8,731
|
def get_genes_and_pathways(reactions, r_numbers, species):
"""Returns a CSV-formatted string with the list of genes and pathways where
the reaction(s) of 'species' appear.
:param reactions: list of reactions for species
:param r_numbers: RNumbers object
:param species: KEGG organism code
:return: CSV-formatted string with genes and pathways where reactions of
species are present
"""
gene_set = set()
pathway_set = set()
for reaction in reactions:
organism = r_numbers.find(reaction).find(species)
assert organism is not None
for gene in organism.genes:
gene_set.add(gene.replace(species + ':', ''))
for pathway in organism.pathways:
pathway_set.add(pathway)
gene_col = ' '.join(sorted(gene_set))
pathway_col = ' '.join(sorted(pathway_set))
return gene_col.rstrip() + ';' + pathway_col.rstrip() + ';'
| 8,732
|
def set(key, value):
"""Sets the value for a key.
Sets the value of the specified configuration key in bob's global
configuration file.
\b
Arguments
---------
key : str
The key to set the value for.
value : str
The value of the key.
\b
Fails
-----
* If something goes wrong.
"""
try:
rc[key] = value
_saverc(rc)
except Exception:
logger.error("Could not configure the rc file", exc_info=True)
raise click.ClickException("Failed to change the configuration.")
| 8,733
|
def datasetFiles(request):
"""
Return a list all dataset files in the datasets directory, by looking for files ending
with .h5 suffix. eg. ['/Users/jarnyc/BioPyramid/data/datasets/lanner.1.0.h5']
"""
# This is the dataset directory, set by the config file
datadir = request.registry.settings['biopyramid.model.datadir']
# Go through each file in the directory and fetch files with .h5 suffix
filepaths = []
for filename in os.listdir(datadir):
if filename.endswith(".h5"):
filepaths.append(os.path.join(datadir, filename))
return filepaths
| 8,734
|
def relate_ca(assessment, template):
"""Generates custom attribute list and relates it to Assessment objects
Args:
assessment (model instance): Assessment model
template: Assessment Temaplte instance (may be None)
"""
if not template:
return None
ca_definitions = all_models.CustomAttributeDefinition.query.options(
orm.undefer_group('CustomAttributeDefinition_complete'),
).filter_by(
definition_id=template.id,
definition_type="assessment_template",
).order_by(
all_models.CustomAttributeDefinition.id
)
created_cads = []
for definition in ca_definitions:
cad = all_models.CustomAttributeDefinition(
title=definition.title,
definition=assessment,
attribute_type=definition.attribute_type,
multi_choice_options=definition.multi_choice_options,
multi_choice_mandatory=definition.multi_choice_mandatory,
mandatory=definition.mandatory,
helptext=definition.helptext,
placeholder=definition.placeholder,
)
db.session.add(cad)
created_cads.append(cad)
return created_cads
| 8,735
|
def getSenderNumberMgtURL(request):
"""
λ°μ λ²νΈ κ΄λ¦¬ νμ
URLμ λ°νν©λλ€.
- 보μμ μ±
μ λ°λΌ λ°νλ URLμ 30μ΄μ μ ν¨μκ°μ κ°μ΅λλ€.
- https://docs.popbill.com/fax/python/api#GetSenderNumberMgtURL
"""
try:
# νλΉνμ μ¬μ
μλ²νΈ
CorpNum = settings.testCorpNum
# νλΉνμ μμ΄λ
UserID = settings.testUserID
url = faxService.getSenderNumberMgtURL(CorpNum, UserID)
return render(request, 'url.html', {'url': url})
except PopbillException as PE:
return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
| 8,736
|
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:
if all(isinstance(s[0], basestring) for s in [spec, match]):
match_length = max(i for i in range(len(match[0]))
if match[0].startswith(spec[0][:i]))
elif is_number(match[0]) and is_number(spec[0]):
m = bool(match[0]) if isinstance(match[0], np.bool_) else match[0]
s = bool(spec[0]) if isinstance(spec[0], np.bool_) else spec[0]
match_length = -abs(m-s)
else:
match_length = 0
match_lengths.append((i, match_length, spec[0]))
if len(new_specs) == 1:
return new_specs[0][0]
elif new_specs:
depth = depth+1
return closest_match(match[1:], new_specs, depth)
else:
if depth == 0 or not match_lengths:
return None
else:
return sorted(match_lengths, key=lambda x: -x[1])[0][0]
| 8,737
|
def average(w, axis=-1):
"""Calculate average
Example:
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]))
>>> average(w1)
Waveform(array([0, 1]), array([ 2. , 2.5]))
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \
xlabels=['row','col'])
>>> average(w1, axis='row')
Waveform(array([0, 1]), array([ 0.5, 4. ]))
"""
return reducedim(w, np.mean(w._y, axis=w.getaxis(axis)),
axis=w.getaxis(axis))
| 8,738
|
def random():
"""Return a random parameter set for the model."""
total_thickness = 10**np.random.uniform(2, 4.7)
Nlayers = np.random.randint(2, 200)
d_spacing = total_thickness / Nlayers
thickness = d_spacing * np.random.uniform(0, 1)
length_head = thickness * np.random.uniform(0, 1)
length_tail = thickness - length_head
Caille_parameter = np.random.uniform(0, 0.8)
pars = dict(
length_head=length_head,
length_tail=length_tail,
Nlayers=Nlayers,
d_spacing=d_spacing,
Caille_parameter=Caille_parameter,
)
return pars
| 8,739
|
def InformationalBuilders(site_config, boards_dict, ge_build_config):
"""Create all informational builders.
We have a number of informational builders that are built, but whose output is
not directly used for anything other than reporting success or failure.
Args:
site_config: config_lib.SiteConfig to be modified by adding templates
and configs.
boards_dict: A dict mapping board types to board name collections.
ge_build_config: Dictionary containing the decoded GE configuration file.
"""
external_board_configs = CreateBoardConfigs(
site_config, boards_dict, ge_build_config)
internal_board_configs = CreateInternalBoardConfigs(
site_config, boards_dict, ge_build_config)
_chrome_boards = frozenset(
board for board, config in internal_board_configs.iteritems()
if config.get('sync_chrome', True))
hw_test_list = HWTestList(ge_build_config)
_chrome_informational_hwtest_boards = frozenset([
'caroline',
'eve',
'peach_pit',
'reks',
'tricky',
'veyron_minnie',
])
_chrome_informational_swarming_boards = frozenset([
'eve',
])
# We have to mark all autogenerated PFQs as not important so the master
# does not wait for them. http://crbug.com/386214
# If you want an important PFQ, you'll have to declare it yourself.
informational_boards = (
(boards_dict['all_release_boards'] & _chrome_boards))
_tot_chrome_pfq_informational_board_configs = UpdateBoardConfigs(
internal_board_configs,
_chrome_informational_hwtest_boards,
hw_tests=hw_test_list.DefaultListChromePFQInformational(
pool=constants.HWTEST_CONTINUOUS_POOL))
_tot_chrome_pfq_informational_board_configs = UpdateBoardConfigs(
_tot_chrome_pfq_informational_board_configs,
_chrome_informational_swarming_boards,
active_waterfall=waterfall.WATERFALL_SWARMING)
site_config.AddForBoards(
'tot-chrome-pfq-informational',
informational_boards,
_tot_chrome_pfq_informational_board_configs,
site_config.templates.chrome_pfq_informational,
important=False)
# TODO(ihf): Remove as obsolete.
site_config.Add(
'x86-generic-tot-asan-informational',
site_config.templates.tot_asan_informational,
boards=['x86-generic'],
)
site_config.Add(
'amd64-generic-asan',
site_config.templates.asan,
site_config.templates.incremental,
site_config.templates.no_hwtest_builder,
display_label=config_lib.DISPLAY_LABEL_INFORMATIONAL,
boards=['amd64-generic'],
description='Build with Address Sanitizer (Clang)',
# THESE IMAGES CAN DAMAGE THE LAB and cannot be used for hardware testing.
disk_layout='4gb-rootfs',
)
site_config.Add(
'amd64-generic-tot-asan-informational',
site_config.templates.tot_asan_informational,
site_config.templates.no_hwtest_builder,
# THESE IMAGES CAN DAMAGE THE LAB and cannot be used for hardware testing.
disk_layout='4gb-rootfs',
boards=['amd64-generic'],
)
site_config.Add(
'betty-asan',
site_config.templates.asan,
site_config.templates.incremental,
site_config.templates.no_hwtest_builder,
site_config.templates.internal,
display_label=config_lib.DISPLAY_LABEL_INFORMATIONAL,
boards=['betty'],
description='Build with Address Sanitizer (Clang)',
# THESE IMAGES CAN DAMAGE THE LAB and cannot be used for hardware testing.
disk_layout='4gb-rootfs',
)
site_config.Add(
'betty-tot-asan-informational',
site_config.templates.tot_asan_informational,
site_config.templates.no_hwtest_builder,
# THESE IMAGES CAN DAMAGE THE LAB and cannot be used for hardware testing.
disk_layout='4gb-rootfs',
boards=['betty'],
)
site_config.Add(
'amd64-generic-fuzzer',
site_config.templates.fuzzer,
boards=['amd64-generic'],
description='Build for fuzzing testing',
# THESE IMAGES CAN DAMAGE THE LAB and cannot be used for hardware testing.
disk_layout='4gb-rootfs',
)
site_config.Add(
'amd64-generic-goma-canary-chromium-pfq-informational',
site_config.templates.chromium_pfq_informational,
site_config.templates.no_hwtest_builder,
site_config.templates.no_vmtest_builder,
important=False,
description='Test canary versions of goma.',
boards=[
'amd64-generic',
],
)
_chrome_perf_boards = frozenset([
'daisy',
'reef',
])
site_config.AddForBoards(
'chrome-perf',
_chrome_perf_boards,
internal_board_configs,
site_config.templates.chrome_perf,
)
site_config.AddForBoards(
'telem-chromium-pfq-informational',
['x86-generic', 'amd64-generic'],
internal_board_configs,
site_config.templates.chromium_pfq_informational,
site_config.templates.telemetry,
site_config.templates.chrome_try,
)
_tot_chromium_pfq_informational_swarming_boards = frozenset([
'amd64-generic',
'daisy',
])
_tot_chromium_pfq_informational_board_configs = UpdateBoardConfigs(
external_board_configs,
_tot_chromium_pfq_informational_swarming_boards,
active_waterfall=waterfall.WATERFALL_SWARMING)
site_config.AddForBoards(
'tot-chromium-pfq-informational',
(boards_dict['all_full_boards'] & _chrome_boards),
_tot_chromium_pfq_informational_board_configs,
site_config.templates.chromium_pfq_informational,
site_config.templates.build_external_chrome,
important=False,
internal=False,
manifest_repo_url=site_config.params['MANIFEST_URL'],
overlays=constants.PUBLIC_OVERLAYS)
_telemetry_boards = frozenset([
'amd64-generic',
'arm-generic',
'betty',
])
site_config.AddForBoards(
'telemetry',
_telemetry_boards,
internal_board_configs,
site_config.templates.telemetry,
)
| 8,740
|
def test_fva(ec_model_core, fva_targets):
"""Test that fva returns the expected results."""
df = flux_variability_analysis(ec_model_core)
assert ((df.maximum - df.minimum) > 1e-3).sum() == 38
| 8,741
|
def extract_features_to_dict(image_dir, list_file):
"""extract features and save them with dictionary"""
label, img_list = load_image_list(image_dir, list_file)
ftr = feature
integer_label = label_list_to_int(label)
feature_dict = {'features': ftr,
'label': integer_label,
'label_original': string_list_to_cells(label),
'image_path': string_list_to_cells(img_list)}
return feature_dict
| 8,742
|
def database_connection(
autocommit: bool = False,
) -> typing.Iterator[psycopg2.extensions.connection]:
"""Context manager for database transactions.
By default the transaction is commited when exiting the context
manager normally or rolled back in case of unhandled exception. But
since e.g. VACUUM can not be issued inside transaction, the
connection commit behaviour can be altered with `autocommit=True`.
"""
conn = pool.getconn()
conn.autocommit = autocommit
try:
yield conn
except Exception:
conn.rollback()
raise
else:
conn.commit()
finally:
pool.putconn(conn)
| 8,743
|
def roi_heads_forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]
"""
Args:
features (List[Tensor])
proposals (List[Tensor[N, 4]])
image_shapes (List[Tuple[H, W]])
targets (List[Dict])
"""
if targets is not None:
for t in targets:
# TODO: https://github.com/pytorch/pytorch/issues/26731
floating_point_types = (torch.float, torch.double, torch.half)
assert t["boxes"].dtype in floating_point_types, "target boxes must of float type"
assert t["labels"].dtype == torch.int64, "target labels must of int64 type"
if self.has_keypoint():
assert t["keypoints"].dtype == torch.float32, "target keypoints must of float type"
if self.training:
proposals, matched_idxs, labels, regression_targets, length_labels = self.select_training_samples(proposals, targets)
else:
labels = None
regression_targets = None
matched_idxs = None
box_features = self.box_roi_pool(features, proposals, image_shapes)
box_features = self.box_head(box_features)
class_logits, box_regression, length_logits = self.box_predictor(box_features)
result: List[Dict[str, torch.Tensor]] = []
losses = {}
if self.training:
assert labels is not None and regression_targets is not None and length_labels is not None
loss_classifier, loss_box_reg, loss_length = fastrcnn_loss(class_logits, box_regression, labels, regression_targets, length_labels, length_logits)
losses = {"loss_classifier": loss_classifier, "loss_box_reg": loss_box_reg, "loss_vessel_length": loss_length}
else:
boxes, scores, labels, lengths = postprocess_detections(class_logits, box_regression, proposals, image_shapes, length_logits)
num_images = len(boxes)
for i in range(num_images):
result.append(
{
"boxes": boxes[i],
"labels": labels[i],
"scores": scores[i],
"lengths": lengths[i]
}
)
if self.has_mask():
mask_proposals = [p["boxes"] for p in result]
if self.training:
assert matched_idxs is not None
# during training, only focus on positive boxes
num_images = len(proposals)
mask_proposals = []
pos_matched_idxs = []
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
mask_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
if self.mask_roi_pool is not None:
mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)
mask_features = self.mask_head(mask_features)
mask_logits = self.mask_predictor(mask_features)
else:
raise Exception("Expected mask_roi_pool to be not None")
loss_mask = {}
if self.training:
assert targets is not None
assert pos_matched_idxs is not None
assert mask_logits is not None
gt_masks = [t["masks"] for t in targets]
gt_labels = [t["labels"] for t in targets]
rcnn_loss_mask = maskrcnn_loss(mask_logits, mask_proposals, gt_masks, gt_labels, pos_matched_idxs)
loss_mask = {"loss_mask": rcnn_loss_mask}
else:
labels = [r["labels"] for r in result]
masks_probs = maskrcnn_inference(mask_logits, labels)
for mask_prob, r in zip(masks_probs, result):
r["masks"] = mask_prob
losses.update(loss_mask)
# keep none checks in if conditional so torchscript will conditionally
# compile each branch
if (
self.keypoint_roi_pool is not None
and self.keypoint_head is not None
and self.keypoint_predictor is not None
):
keypoint_proposals = [p["boxes"] for p in result]
if self.training:
# during training, only focus on positive boxes
num_images = len(proposals)
keypoint_proposals = []
pos_matched_idxs = []
assert matched_idxs is not None
for img_id in range(num_images):
pos = torch.where(labels[img_id] > 0)[0]
keypoint_proposals.append(proposals[img_id][pos])
pos_matched_idxs.append(matched_idxs[img_id][pos])
else:
pos_matched_idxs = None
keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)
keypoint_features = self.keypoint_head(keypoint_features)
keypoint_logits = self.keypoint_predictor(keypoint_features)
loss_keypoint = {}
if self.training:
assert targets is not None
assert pos_matched_idxs is not None
gt_keypoints = [t["keypoints"] for t in targets]
rcnn_loss_keypoint = keypointrcnn_loss(
keypoint_logits, keypoint_proposals, gt_keypoints, pos_matched_idxs
)
loss_keypoint = {"loss_keypoint": rcnn_loss_keypoint}
else:
assert keypoint_logits is not None
assert keypoint_proposals is not None
keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)
for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):
r["keypoints"] = keypoint_prob
r["keypoints_scores"] = kps
losses.update(loss_keypoint)
return result, losses
| 8,744
|
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
| 8,745
|
def S_tunnel_e0(self, mu, sig, Efl, Efr, Tl, Tr):
"""energy flux
Conduction band edge 0 at higher of the two
"""
a = mu-sig/2
b = mu+sig/2
kTl = sc.k*Tl
kTr = sc.k*Tr
Blr = (a/kTl+1)*np.exp(-a/kTl)-(b/kTl+1)*np.exp(-b/kTl)
Brl = (a/kTr+1)*np.exp(-a/kTr)-(b/kTr+1)*np.exp(-b/kTr)
Slr = kTl**3*Blr*np.exp(Efl/kTl)
Srl = -kTr**3*Brl*np.exp(Efr/kTr)
# Slr = kTl**3*Blr
# Srl = -kTr**3*Brl
ret = self._cS*(Slr+Srl)
return ret
| 8,746
|
def to_camel_java(text, first_lower=True):
"""Returns the text in camelCase or CamelCase format for Java
"""
return to_camelcase(text, first_lower=first_lower,
reserved_keywords=JAVA_KEYWORDS, suffix="_")
| 8,747
|
def parse_resources(resource_name, resource_data, book_node, **auth_info):
""" Creates resource topics """
resource_data = resource_data or []
resource_str = "{}-{}".format(book_node.source_id, resource_name.replace(' ', '-').lower())
# Create resource topic
resource_node = nodes.TopicNode(source_id=resource_str, title=resource_name)
book_node.add_child(resource_node)
# Add resource documents
for resource in resource_data:
if resource.get('link_document_url') and resource['link_document_url'].endswith(".pdf"):
description = parse_description(resource.get('resource_description'))
add_file_node(resource_node, resource.get("link_document_url"), resource.get('resource_heading'), description=description, **auth_info)
| 8,748
|
def rectangle_field(N_1, N_2, B_1, B_2, H, D, r_b):
"""
Build a list of boreholes in a rectangular bore field configuration.
Parameters
----------
N_1 : int
Number of borehole in the x direction.
N_2 : int
Number of borehole in the y direction.
B_1 : float
Distance (in meters) between adjacent boreholes in the x direction.
B_2 : float
Distance (in meters) between adjacent boreholes in the y direction.
H : float
Borehole length (in meters).
D : float
Borehole buried depth (in meters).
r_b : float
Borehole radius (in meters).
Returns
-------
boreField : list of Borehole objects
List of boreholes in the rectangular bore field.
Examples
--------
>>> boreField = gt.boreholes.rectangle_field(N_1=3, N_2=2, B_1=5., B_2=5.,
H=100., D=2.5, r_b=0.05)
The bore field is constructed line by line. For N_1=3 and N_2=2, the bore
field layout is as follows::
3 4 5
0 1 2
"""
borefield = []
for j in range(N_2):
for i in range(N_1):
borefield.append(Borehole(H, D, r_b, x=i*B_1, y=j*B_2))
return borefield
| 8,749
|
def mask_outputs(machine):
"""Erase outputs from each edge where they are zero."""
for u, v, d in machine.edges(data=True):
for k in d:
if k in machine.outputs and d[k] == 0:
d.pop(k)
| 8,750
|
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90',
printout=True):
"""
Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O.
Based on Table 3 of Denis et al. 2013
"""
if phase == 'Fo90':
H_to_1_ppm = 16.35
elif phase == 'opx':
H_to_1_ppm = 11.49
elif phase == 'cpx':
H_to_1_ppm = 11.61
else:
print('Valid options for phase are Fo90, opx, and cpx')
return
if from_unit == 'H/10^6 Si':
if to_unit == 'ppm H2O':
new_conc = conc / H_to_1_ppm
elif to_unit == 'per m3':
new_conc = conc * (1.0/308.67) * (1e30)
else:
print('only going to units "ppm H2O" and "per m3"')
return
elif from_unit == 'ppm H2O':
if to_unit == 'H/10^6 Si':
new_conc = conc * H_to_1_ppm
elif to_unit == 'per m3':
new_conc = (conc * H_to_1_ppm) * (1.0/308.67) * (1e30)
else:
print('only going to "H/10^6 Si" or "per m3"')
return
elif from_unit == 'per m3':
if to_unit == 'H/10^6 Si':
new_conc = conc / ((1.0/308.67) * (1e30))
elif to_unit == 'ppm H2O':
new_conc = (conc / ((1.0/308.67) * (1e30))) / H_to_1_ppm
else:
print('only going to "H/10^6 Si" or "ppm H2O"')
return
else:
print('Only going from H/10^6 Si, ppm H2O, and per m3 for now')
return
if printout is True:
output = ' '.join(('{:.2f}'.format(conc), from_unit, '=',
'{:.2f}'.format(new_conc), to_unit, 'for', phase))
print(output)
return new_conc
| 8,751
|
def run(file: str, expected: str) -> None:
""" Run with input """
rv, out = getstatusoutput(f'{RUN} {file}')
assert rv == 0
assert out.rstrip() == expected
| 8,752
|
def countBasesInFasta(fastaFile):
"""
Given a fasta file, return a dict where the number of records and
the total number of bases are given by 'records' and 'bases' respectively.
"""
recordRE = re.compile(r'^>')
whiteSpaceRE = re.compile(r'\s+')
total_bases = 0
total_seqs = 0
with open(fastaFile) as f:
for line in f:
if recordRE.match(line):
total_seqs += 1
continue
total_bases += len(whiteSpaceRE.sub('', line))
return {'records': total_seqs, 'bases': total_bases}
| 8,753
|
def load_mnist_denoising(path_raw_dataset, batch_size=1, mu=0., sigma=0.6, deterministic=True):
"""
1. Get the MNIST dataset via PyTorch built-in APIs.
2. Wrap it with customized wrapper with additive Gaussian noise processor
3. Build PyTorch data loader objects.
:param path_raw_dataset:
:param batch_size:
:param mu:
:param sigma:
:param deterministic:
:return: dict of pytorch DataLoader objects.
{
'train':
(iterable) [noisy_image, (clean_image, noise)]
noisy_image shape: [batch, c, w, h]
clean_image shape: [batch, c, w, h]
noise shape: [batch, 1, c, w, h]
'val':
(iterable) [noisy_image, (clean_image, noise)]
noisy_image shape: [batch, c, w, h]
clean_image shape: [batch, c, w, h]
noise shape: [batch, 1, c, w, h]
}
"""
MNIST = P.data_processor_wrapper(torchvision.datasets.MNIST,
P.Processor_Denoising_AddGau(mu, sigma, deterministic, grayscale=True))
transform_input = transforms.Compose([
transforms.ToTensor(),
P.TransTo3Channels()
])
try:
data_train = MNIST(root=path_raw_dataset, train=True, download=False,
transform=transform_input)
except:
torch_dataset_download_helper()
data_train = MNIST(root=path_raw_dataset, train=True, download=True,
transform=transform_input)
try:
data_val = MNIST(root=path_raw_dataset, train=False, download=False,
transform=transform_input)
except:
torch_dataset_download_helper()
data_val = MNIST(root=path_raw_dataset, train=False, download=True,
transform=transform_input)
datasets = {'train': data_train, 'val': data_val}
data_loaders = {i: torch.utils.data.DataLoader(datasets[i], batch_size=batch_size, shuffle=False)
for i in ['train', 'val']}
return data_loaders
| 8,754
|
def test_create_env_def_file_cwl():
"""testing create_env_def_file with cwl option and an input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'cwl_v1',
'cwl_url': 'someurl',
'main_cwl': 'somecwl',
'other_cwl_files': 'othercwl1,othercwl2'},
'Input': {'Env': {'SOME_ENV': '1234'}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'cwl')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=cwl_v1\n'
'export CWL_URL=someurl\n'
'export MAIN_CWL=somecwl\n'
'export CWL_FILES="othercwl1 othercwl2"\n'
'export SOME_ENV=1234\n'
'export PRESERVED_ENV_OPTION="--preserve-environment SOME_ENV "\n'
'export DOCKER_ENV_OPTION="-e SOME_ENV "\n')
assert envfile_content == right_content
os.remove(envfilename)
| 8,755
|
def help_systempowerlimiton(self, commands):
"""
limiton: Activates the powerlimit for a server, and
enables power throttling.
==================================================================
Usage:
set system power limiton -i {serverid}
-i -- serverid, the target server number. Typically 1-48
[-h] -help; display the correct syntax
"""
| 8,756
|
def make_triplet_freqs(sentence, triplet_freqs):
"""
ζεεγ3γ€η΅γ«γγ
"""
# Janomeγ§εθͺγ«εε²γγ
t = Tokenizer()
morphemes = [token.surface for token in t.tokenize(sentence)]
if len(morphemes) < 3:
return {}
# ηΉ°γθΏγ
for i in range(len(morphemes) - 2):
triplet = tuple(morphemes[i:i+3])
triplet_freqs[triplet] += 1
# beginγθΏ½ε
triplet = (BEGIN, morphemes[0], morphemes[1])
triplet_freqs[triplet] = 1
# endγθΏ½ε
triplet = (morphemes[-2], morphemes[-1], END)
triplet_freqs[triplet] = 1
return triplet_freqs
| 8,757
|
def test_train_val_split(patient_id,
sub_dataset_ids,
cv_fold_number):
""" if cv_fold_number == 1:
if patient_id in sub_dataset_ids[-5:]: return 'test'
elif patient_id in sub_dataset_ids[-7:-5]: return 'validation'
else: return 'train'
elif cv_fold_number == 2:
if patient_id in sub_dataset_ids[-10:-5]: return 'test'
elif patient_id in sub_dataset_ids[-12:-10]: return 'validation'
else: return 'train'
# used for accumulating results of tests on cv1 and cv2
if cv_fold_number == 3:
if patient_id in sub_dataset_ids[-10:]: return 'test'
elif patient_id in sub_dataset_ids[-12:-11]: return 'validation'
else: return 'train' """
if patient_id in [1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]:
return 'test'
elif patient_id == 36:
return 'validation'
else:
return 'train'
| 8,758
|
def f_rank(iterable, start=1):
"""Fractional ranking"""
last, fifo = None, []
for n, item in enumerate(iterable, start):
if item[0] != last:
if fifo:
mean = sum(f[0] for f in fifo) / len(fifo)
while fifo:
yield mean, fifo.pop(0)[1]
last = item[0]
fifo.append((n, item))
if fifo:
mean = sum(f[0] for f in fifo) / len(fifo)
while fifo:
yield mean, fifo.pop(0)[1]
| 8,759
|
def generate_data_design_config(
random_generator: np.random.Generator,
) -> Iterable[DataSetParameters]:
"""Generates the data design configuration for evaluating M3 strategy."""
keys = LEVELS.keys()
levels = [len(LEVELS[k]) for k in keys]
for i, sample in enumerate(
lhs(n=len(levels), samples=NUM_SAMPLES_FOR_LHS, criterion="maximin")
):
design_parameters = {"id": str(i)}
for key, level in zip(keys, sample):
design_parameters[key] = LEVELS[key][int(level * len(LEVELS[key]))]
if design_parameters["overlap_generator_params"].name == "Independent":
raw_overlap_params = design_parameters["overlap_generator_params"]
design_parameters["overlap_generator_params"] = raw_overlap_params._replace(
params={
"universe_size": int(
design_parameters["largest_publisher_size"]
/ raw_overlap_params.params["largest_pub_to_universe_ratio"]
),
"random_generator": raw_overlap_params.params["random_generator"],
}
)
yield DataSetParameters(**design_parameters)
| 8,760
|
def get_convertible_info():
"""
D:\Trade\TDX\cjzq_tdx\T0002\hq_cache\speckzzdata.txt
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\speckzzdata.txt')
columns = [
'exchange', 'code', 'stock_code', 'convert_price', 'current_interest', 'list_amount', 'call_price',
'redeem_price',
'convert_start', 'due_price', 'convert_end', 'convert_code', 'current_amount', 'list_date', 'convert_ratio(%)'
]
df = pd.read_csv(filename, names=columns)
df['exchange'] = df['exchange'].apply(lambda x: 'sse' if x else 'szse')
df[['code', 'stock_code']] = df[['code', 'stock_code']].applymap(lambda x: '{:0>6d}'.format(x))
df[['list_amount', 'current_amount']] = df[['list_amount', 'current_amount']] * 10000
return df
| 8,761
|
def test_striplog_colour_plot():
"""
Tests mpl image of striplog with the ladder option.
"""
legend = Legend.builtin('NSDOE')
imgfile = "tutorial/M-MG-70_14.3_135.9.png"
striplog = Striplog.from_image(imgfile, 14.3, 135.9, legend=legend)
for iv in striplog:
iv.data['porosity'] = iv.top.z/100
fig = striplog.plot(colour='porosity', aspect=3, return_fig=True)
return fig
| 8,762
|
def fetch_data(
o_included: str,
flowcharts: dict,
o_metadata_file: str,
o_biom_file: str,
p_redbiom_context: str,
p_bloom_sequences: str,
p_reads_filter: int,
unique: bool,
update: bool,
dim: bool) -> pd.DataFrame:
"""
Parameters
----------
o_included : str
Path to output metadata for the included samples only.
flowcharts : dict
Steps of the workflow with samples counts (simpler representation).
o_metadata_file : str
[if --fetch] Path to the output metadata table file.
o_biom_file : str
[if --fetch] Path to the output biom table file.
p_redbiom_context : str
[if --fetch] Redbiom context for fetching 16S data from Qiita.
p_bloom_sequences : str
[if --fetch] Fasta file containing the sequences known
to bloom in fecal samples.
p_reads_filter : int
[if --fetch] Minimum number of reads per sample.
unique : bool
[if --fetch] Whether to keep a unique sample per host or not.
update : bool
[if --fetch] Update the sample names to remove Qiita-prep info.
dim : bool
[if --fetch] Whether to add the number of samples in the final
biom file name before extension or not.
Returns
-------
included : pd.DataFrame
Metadata for the included samples only.
"""
cmd = 'Xrbfetch'
cmd += ' -m %s' % o_included
if o_metadata_file:
o_meta = o_metadata_file
else:
o_meta = '%s_fetched.tsv' % splitext(o_included)[0]
cmd += ' -o %s' % o_meta
if o_biom_file:
cmd += ' -b %s' % o_biom_file
else:
cmd += ' -b %s_fetched.biom' % splitext(o_included)[0]
if p_bloom_sequences:
cmd += ' -s %s' % p_bloom_sequences
cmd += ' -r %s' % p_redbiom_context
cmd += ' -f %s' % p_reads_filter
if unique:
cmd += ' --unique'
if update:
cmd += ' --update'
if dim:
cmd += ' --dim'
cmd += ' --force'
cmd += ' --no-simple'
print('- fetch on redbiom:')
redbiom_fetching = subprocess.getoutput(cmd).split('\n')
print('Done.')
flowcharts['data'] = []
for step in redbiom_fetching:
step_line = step.strip()
if step_line.startswith('- Load biom table...'):
print('[fetch]', step_line)
n = step_line.split()[6]
flowcharts['data'].append(
['Fetch', n, 'redbiom', p_redbiom_context, None])
elif step_line.startswith('- Filter blooms...'):
print('[fetch]', step_line)
n = step_line.split()[5]
flowcharts['data'].append(
['Filter blooms', n, None, None, None])
elif step_line.startswith('- Get best samples from ambiguous'):
print('[fetch]', step_line)
n = step_line.split()[8]
flowcharts['data'].append(
['Solve redbiom ambiguous', n, 'most reads',
'...or... ', 'most features'])
elif step_line.startswith('- Filter biom for min'):
print('[fetch]', step_line)
f = step_line.split()[5]
n = step_line.split()[11]
flowcharts['data'].append(
['Filter reads', n, 'min %s' % f, None])
elif step_line.startswith('- Already one sample per host_subject_id'):
print('[fetch]', step_line)
n = step_line.split()[8]
flowcharts['data'].append(
['One per sample ID', n, None, None, None])
elif step_line.startswith('- Keep the best sample per host_subject_id'):
print('[fetch]', step_line)
n = step_line.split()[9]
flowcharts['data'].append(
['One per sample ID', n, None, None, None])
if 'Outputs:' in redbiom_fetching:
outs = redbiom_fetching[(redbiom_fetching.index('Outputs:') + 1):]
if len(outs):
return read_meta_pd(outs[0])
print('nothing fetched: check command:\n%s\nExiting...' % cmd)
sys.exit(1)
| 8,763
|
def searchArtist(artistName, session=models.session):
"""Search for artist. Returns models.ArtistSearch"""
return models.ArtistSearch(artistName, session)
| 8,764
|
def _ddnone():
"""allow defaultdict to be pickled"""
return defaultdict(_none)
| 8,765
|
def apply_join(query: Select, table: Table, join_table: Table, join: TableJoin):
"""
Performs a inner or outer join between two tables on a given query object.
TODO: enable multiple joins
:param query: A SQLAlchemy select object.
:param table: The Table we are joining from.
:param join_table: The Table we are joining to.
:param join: The Join object describing how to join the tables.
:return: A SQLAlchemy select object modified to join two tables.
"""
error_msg = 'Invalid join, "{}" is not a column on table "{}"'
join_conditions = []
for column_pair in join.column_pairs:
from_col = table.columns.get(column_pair.from_column)
to_col = join_table.columns.get(column_pair.to_column)
if from_col is None:
raise ValueError(error_msg.format(column_pair.from_column, table.name))
if to_col is None:
raise ValueError(error_msg.format(column_pair.to_column, join_table.name))
join_conditions.append(from_col == to_col)
return query.select_from(table.join(join_table, onclause=and_(*join_conditions), isouter=join.outer_join))
| 8,766
|
def filter_shapely(feature):
"""
feature1 = feature_extract(feature)
feature2 = filter_shapely(feature1)
"""
tmp = extract_Accumulation_entropy_list(feature)
tmp2=[]
for i in range(len(tmp)):
if i!=0:
tmp2.append(tmp[i]-tmp[i-1])
else:
tmp2.append(tmp[i])
return tmp2
| 8,767
|
def stations_by_river(stations):
"""Returns a dictionary mapping river names (key)
to a list of stations (object)"""
rivers_stations_dict = {} # Create empty dictionary
for i in range(len(stations)): # Iterate through list of stations
# Data type checks
if type(stations[i]) is MonitoringStation:
pass # Checks if stations are correct class
else:
raise TypeError("ERROR: Station is not a MonitoringStation")
if type(stations[i].name) is str: # Checks if name is string
pass
else:
raise TypeError("ERROR: Station 'name' attribute is not a string")
if type(stations[i].river) is str: # Checks if river is string
pass
else:
raise TypeError("ERROR: Station 'river' attribute is not a string")
if not stations[i].river in rivers_stations_dict:
# Checks if river is not in dictionary
rivers_stations_dict[stations[i].river] = []
# Adds river to dictionary with blank list
if not stations[i].name in rivers_stations_dict:
rivers_stations_dict[stations[i].river].append(stations[i].name)
# Adds station name to object list
return rivers_stations_dict
| 8,768
|
def getCurrentProfile():
"""
Get the name of the current profile.
"""
return __createJSON("GetCurrentProfile", {})
| 8,769
|
def get_params_from_request(req: web.Request) -> QueryParams:
"""
This function need for convert query string to filter parameters.
"""
page = int(req.rel_url.query.get('page', '1'))
cursor = req.rel_url.query.get('cursor')
sort = req.rel_url.query.get('sort')
sort_dir = req.rel_url.query.get('sortDir')
if sort and sort_dir == 'desc':
sort = f'-{sort}'
return QueryParams(
page=page,
cursor=int(cursor) if cursor else None,
order_by=sort,
)
| 8,770
|
def norm_mem_interval(pt):
"""Normalize membership in interval."""
return pt.on_prop(arg_conv(binop_conv(auto.auto_conv())))
| 8,771
|
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig
| 8,772
|
def train():
"""
Main script.
"""
args = get_args()
# Get context.
from nnabla.contrib.context import extension_context
extension_module = args.context
if args.context is None:
extension_module = 'cpu'
logger.info("Running in %s" % extension_module)
ctx = extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
# Dataset
# We use Tiny ImageNet from Stanford CS231N class.
# https://tiny-imagenet.herokuapp.com/
# Tiny ImageNet consists of 200 categories, each category has 500 images
# in training set. The image size is 64x64. To adapt ResNet into 64x64
# image inputs, the input image size of ResNet is set as 56x56, and
# the stride in the first conv and the first max pooling are removed.
data = data_iterator_tiny_imagenet(args.batch_size, 'train')
vdata = data_iterator_tiny_imagenet(args.batch_size, 'val')
num_classes = 200
tiny = True # TODO: Switch ILSVRC2012 dataset and TinyImageNet.
t_model = get_model(
args, num_classes, test=False, tiny=tiny)
t_model.pred.persistent = True # Not clearing buffer of pred in backward
v_model = get_model(
args, num_classes, test=True, tiny=tiny)
v_model.pred.persistent = True # Not clearing buffer of pred in forward
# Create Solver.
solver = S.Momentum(args.learning_rate, 0.9)
solver.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_err = M.MonitorSeries("Training error", monitor, interval=10)
monitor_vloss = M.MonitorSeries("Validation loss", monitor, interval=10)
monitor_verr = M.MonitorSeries("Validation error", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=10)
# Training loop.
for i in range(args.max_iter):
# Save parameters
if i % args.model_save_interval == 0:
nn.save_parameters(os.path.join(
args.model_save_path, 'param_%06d.h5' % i))
# Validation
if i % args.val_interval == 0:
# Clear all intermediate memory to save memory.
# t_model.loss.clear_recursive()
l = 0.0
e = 0.0
for j in range(args.val_iter):
images, labels = vdata.next()
v_model.image.d = images
v_model.label.d = labels
v_model.image.data.cast(np.uint8, ctx)
v_model.label.data.cast(np.int32, ctx)
v_model.loss.forward(clear_buffer=True)
l += v_model.loss.d
e += categorical_error(v_model.pred.d, v_model.label.d)
monitor_vloss.add(i, l / args.val_iter)
monitor_verr.add(i, e / args.val_iter)
# Clear all intermediate memory to save memory.
# v_model.loss.clear_recursive()
# Training
l = 0.0
e = 0.0
solver.zero_grad()
# Gradient accumulation loop
for j in range(args.accum_grad):
images, labels = data.next()
t_model.image.d = images
t_model.label.d = labels
t_model.image.data.cast(np.uint8, ctx)
t_model.label.data.cast(np.int32, ctx)
t_model.loss.forward(clear_no_need_grad=True)
t_model.loss.backward(clear_buffer=True) # Accumulating gradients
l += t_model.loss.d
e += categorical_error(t_model.pred.d, t_model.label.d)
solver.weight_decay(args.weight_decay)
solver.update()
monitor_loss.add(i, l / args.accum_grad)
monitor_err.add(i, e / args.accum_grad)
monitor_time.add(i)
# Learning rate decay at scheduled iter
if i in args.learning_rate_decay_at:
solver.set_learning_rate(solver.learning_rate() * 0.1)
nn.save_parameters(os.path.join(args.model_save_path,
'param_%06d.h5' % args.max_iter))
nnp_file = os.path.join(
args.model_save_path, 'resnet_%06d.nnp' % (args.max_iter))
runtime_contents = {
'networks': [
{'name': 'Validation',
'batch_size': v_model.pred.shape[0],
'outputs': {'y': v_model.pred},
'names': {'x': v_model.image}}],
'executors': [
{'name': 'Runtime',
'network': 'Validation',
'data': ['x'],
'output': ['y']}]}
save.save(nnp_file, runtime_contents)
from cpp_forward_check import check_cpp_forward
check_cpp_forward(args.model_save_path, [v_model.image.d], [
v_model.image], v_model.pred, nnp_file)
| 8,773
|
def rdf_reader(src):
"""rdf = rdf_reader(src)
src rdf filename
rdf The RDF mapping object"""
return RDF(*list(rdf_include(src)))
| 8,774
|
def get_batches_xy(x, y, batch_size):
"""
Generate inputs and targets in a batch-wise fashion for feed-dict
Args:
x: entire source sequence array
y: entire output sequence array
batch_size: batch size
Returns:
x_batch, y_batch, source_sentence_length, target_sentence_length
"""
for batch_i in range(0, len(x) // batch_size):
start_i = batch_i * batch_size
x_batch = x[start_i:start_i + batch_size]
y_batch = y[start_i:start_i + batch_size]
source_sentence_length = [np.count_nonzero(seq) for seq in x_batch]
target_sentence_length = [np.count_nonzero(seq) for seq in y_batch]
yield x_batch, y_batch, source_sentence_length, target_sentence_length
| 8,775
|
def test_run_failure_filenotfounderror(tmp_path, sample_catalog_minimal):
"""Test failure of _run on RemoveCmd in specifying a nonexistent file."""
# Create a temporary catalog file with responsible-parties
content_type = FileContentType.JSON
catalog_def_dir, catalog_def_file = test_utils.prepare_trestle_project_dir(
tmp_path,
content_type,
sample_catalog_minimal,
test_utils.CATALOGS_DIR
)
# 6. oscal_read fails because file is not found
# Must specify catalogs/ location, not catalogs/my_test_model/.
testargs = [
'trestle', 'remove', '-f', re.sub('my_test_model/', '', str(catalog_def_file)), '-e', 'catalog.metadata'
]
with patch.object(sys, 'argv', testargs):
exitcode = Trestle().run()
assert exitcode == 1
| 8,776
|
def notImplementedYet():
"""
shows a dialog that says that this feature its not implemented
"""
wx.GenericMessageDialog(
parent=wx.GetActiveWindow(),
message=loc('popup.notimplemented.text'),
caption=loc('popup.notimplemented.title'),
style=wx.ICON_INFORMATION | wx.STAY_ON_TOP | wx.OK
).ShowModal()
| 8,777
|
def test_nf_calc(gain, nf_expected, enabled, setup_edfa, si):
""" compare the 2 amplifier models (polynomial and estimated from nf_min and max)
=> nf_model vs nf_poly_fit for boundary gain values: gain_min (and below) & gain_flatmax
same values are expected between the 2 models
=> unitary test for Edfa._calc_nf() (and Edfa.interpol_params)"""
edfa = setup_edfa
frequencies = np.array([c.frequency for c in si.carriers])
pin = np.array([c.power.signal+c.power.nli+c.power.ase for c in si.carriers])
baud_rates = np.array([c.baud_rate for c in si.carriers])
edfa.operational.gain_target = gain
edfa.params.nf_model_enabled = enabled
edfa.interpol_params(frequencies, pin, baud_rates)
nf = edfa.nf
dif = abs(nf[0] - nf_expected)
assert dif < 0.01
| 8,778
|
def draw_signalData(Nsamp=1, alpha=__alpha, beta=__beta, **kwargs):
"""
draw an SNR from the signal distribution
"""
return np.array([ncx2.rvs(__noise_df, nc) for nc in __draw_truncatedPareto(Nsamp, alpha=alpha, beta=beta)])
| 8,779
|
def choose_weighted_images_forced_distribution(num_images, images, nodes):
"""Returns a list of images to cache
Enforces the distribution of images to match the weighted distribution as
closely as possible. Factors in the current distribution of images cached
across nodes.
It is important to note that there may be circumstances which prevent this
function from attaining the desired ideal distribution, but the function
will always try its best to reach the desired distribution based on the
specified weights.
num_images - the number (integer) of images to choose to cache
images - a list of to ImageInputs consider for caching
nodes - a list of NodeInputs to use for determining which images
need to be cached the most
"""
named_distribution = _get_named_image_distribution(images, nodes)
# Take the difference of the desired distribution with the current
# one.
scaled_weights = _get_scaled_weights(
images, _get_scale_factor_for_caching_nodes(num_images, images, nodes))
distribution_difference = [
[image, (scaled_weights[image.name] - named_distribution[image.name])]
for image in images
]
def decrement_distribution(distribution_pair, diff_dict):
distribution_pair[1] -= 1
return _pick_images(
images, distribution_difference, num_images,
picker_func=lambda diff: max(diff, key=lambda pair: pair[1]),
distribution_mutator_func=decrement_distribution)
| 8,780
|
def predict_lumbar_ankles_model(data):
"""Generate lumbar + 2 ankles model predictions for data.
Args:
data (dict): all data matrices/lists for a single subject.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
"""
RESULT_DIR = '../results/imus6_subjects7/sensors03_lumbar_ankles/'\
'iteration0/'
data = selectFeats(data, ['lumbar','ankle_r','ankle_l'])
test_dset = (data['X'], data['y'])
subject = str(int(data['subjectID']))
model = load_model_and_weights(subject, RESULT_DIR)
labels = make_predictions(model, test_dset)
return labels
| 8,781
|
def breakOnEnter(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function is called.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugging module itself or a string containing
the name of the debugging module. Currently, pdb and ipdb are
supported.
"""
if func is None:
return partial(breakOnEnter, debugger=debugger)
debugger = import_(debugger)
@wraps(func)
def wrapper(*args, **kwargs):
return debugger.runcall(func, *args, **kwargs)
return wrapper
| 8,782
|
def linear_search(iterable, item):
"""Returns the index of the item in the unsorted iterable.
Iterates through a collection, comparing each item to the target item, and
returns the index of the first item that is equal to the target item.
* O(n) time complexity
* O(1) space complexity
Args:
iterable: A collection that is iterable.
item: An object to search for.
Returns:
The index of the item in the sorted iterable, or -1 if not found.
Raises:
TypeError: If iterable is not iterable.
"""
try:
_ = iter(iterable)
except TypeError:
raise TypeError('\'{}\' object is not iterable'.format(
type(iterable).__name__))
for index, _item in enumerate(iterable):
if _item == item:
return index
return -1
| 8,783
|
def create_MD_tag(reference_seq, query_seq):
"""Create MD tag
Args:
reference_seq (str) : reference sequence of alignment
query_seq (str) : query bases of alignment
Returns:
md_tag(str) : md description of the alignment
"""
no_change = 0
md = []
for ref_base, query_base in zip(reference_seq, query_seq):
if ref_base.upper() == query_base:
no_change += 1
else:
if no_change > 0:
md.append(str(no_change))
md.append(ref_base)
no_change = 0
if no_change > 0:
md.append(str(no_change))
return ''.join(md)
| 8,784
|
def HDFScopyMerge (src_dir, dst_file, overwrite=False, deleteSource=False):
"""
copyMerge() merges files from an HDFS directory to an HDFS files.
File names are sorted in alphabetical order for merge order.
Inspired by https://hadoop.apache.org/docs/r2.7.1/api/src-html/org/apache/hadoop/fs/FileUtil.html#line.382
:param src_dir: source directory to get files from
:param dst_file: destination file to merge file to
:param overwrite: overwrite destination file if already exists? this would also overwrite temp file if exists
:param deleteSource: drop source directory after merge is complete
"""
sparkutils_init()
def debug_print (message):
if debug:
print("HDFScopyMerge(): " + message)
# check files that will be merged
files = []
for f in fs.listStatus(hadoop.fs.Path(src_dir)):
if f.isFile():
files.append(f.getPath())
if not files:
raise ValueError("Source directory {} is empty".format(src_dir))
# determine order of files in which they will be written:
files.sort(key=lambda f: str(f))
if overwrite and hdfs_exists(dst_file):
hdfs_drop(dst_file)
debug_print("Target file {} dropped".format(dst_file))
# use temp file for the duration of the merge operation
dst_file_tmp = "{}.IN_PROGRESS.tmp".format(dst_file)
# dst_permission = hadoop.fs.permission.FsPermission.valueOf(permission) # , permission='-rw-r-----'
out_stream = fs.create(hadoop.fs.Path(dst_file_tmp), overwrite)
try:
# loop over files in alphabetical order and append them one by one to the target file
for filename in files:
debug_print("Appending file {} into {}".format(filename, dst_file_tmp))
in_stream = fs.open(filename) # InputStream object
try:
hadoop.io.IOUtils.copyBytes(in_stream, out_stream, conf, False) # False means don't close out_stream
finally:
in_stream.close()
finally:
out_stream.close()
if deleteSource:
hdfs_drop(src_dir)
debug_print("Source directory {} removed.".format(src_dir))
try:
hdfs_rename(dst_file_tmp, dst_file)
debug_print("Temp file renamed to {}".format(dst_file))
except:
hdfs_drop(dst_file_tmp) # drop temp file if we can't rename it to target name
raise
| 8,785
|
def get_diff(base, head=None):
"""Return a git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[str]
"""
if not head or head == 'HEAD':
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
cache = '/tmp/git-diff-cache-%s-%s.log' % (base, head)
if os.path.exists(cache):
with open(cache, 'r') as cache_fd:
lines = cache_fd.read().splitlines()
else:
lines = subprocess.check_output(['git', 'diff', base, head]).splitlines()
with open(cache, 'w') as cache_fd:
cache_fd.write('\n'.join(lines))
assert lines
return lines
| 8,786
|
def split_last(dataframe, target_col, sort_col='date', cut=.9):
"""Splits the dataframe on sort_column at the given cut ratio, and splits
the target column
Args:
dataframe: dataframe to be cut
sort_col: column to be sorted on. Default='date'
cut: cut ratio for the train/eval sets
Returns:
X_train: dataframe of the first cut of the data set without the target
y_train: dataframe of the first cut of the data set only target values
X_eval: dataframe of the remaining slice of the data set without target
y_eval: dataframe of the remaining slice of the data set only targets
"""
if sort_col != None:
dataframe = dataframe.sort_values(by=sort_col, axis='columns')
cutoff = dataframe.shape[0]*cut
first_df = dataframe.reset_index(drop=True).loc[:cutoff]
last_df = dataframe.reset_index(drop=True).loc[cutoff:]
X_train = first_df.drop(columns=[target_col])
y_train = np.array(first_df[target_col]).ravel()
X_eval = last_df.drop(columns=[target_col])
y_eval = np.array(last_df[target_col]).ravel()
return X_train, y_train, X_eval, y_eval
| 8,787
|
def genparams_rst255(ctx: click.Context) -> None:
"""
Generate Ristretto255 system parameters.
For details about Ristretto255, refer to https://ristretto.group/
"""
from .ristretto_255 import create_ristretto_255_parameters
pvss = pvss_from_datadir(ctx.obj)
params = create_ristretto_255_parameters(pvss)
ctx.obj.mkdir(exist_ok=True)
write_public(ctx.obj / "parameters", params)
| 8,788
|
def assert_almost_equal(
actual: numpy.float64, desired: numpy.float64, err_msg: Literal["orth.chebyt(1)"]
):
"""
usage.scipy: 1
"""
...
| 8,789
|
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
| 8,790
|
def readCSV(associated_ipaddr, ipaddr, timestamp):
"""
Method that extracts observations from a CSV file.
Parameters:
associated_ipaddr (str): The name of the column that specifies IP addresses of VPN clients
ipaddr (str): The name of the column that specifies IP addresses of users on the public internet
timestamp (str): The name of the column that specifies the observation creation time
Returns:
observations (list): A list of observation dictionaries
"""
observations = []
with open(CSV_FILE, "rt", encoding="ascii") as f:
reader = csv.reader(f)
header = next(reader, None)
for row in reader:
observations.append(
{
"associated_ipaddr": row[header.index(associated_ipaddr)],
"ipaddr": row[header.index(ipaddr)],
"timestamp": translateTime(row[header.index(timestamp)]),
}
)
return observations
| 8,791
|
def interp2d(x, y, z, outshape, verbose=True, doplot=True):
"""
Parameters
----------
x, y : int
X and Y indices of `z`.
z : float
Values for given `x` and `y`.
outshape : tuple of int
Shape of 2D output array.
verbose : bool, optional
Print info to screen.
doplot : bool, optional
Plot results.
Returns
-------
im : float array
2-D array of interpolated data.
"""
# Print the data to screen for checking
if verbose:
print 'DATA USED FOR INTERPOLATION:'
for i, (xx, yy, zz) in enumerate(zip(x, y, z), start=1):
print '{}: {} {} {}'.format(i, xx, yy, zz)
# Perform 2D interpolation
func = interpolate.interpolate.interp2d(x, y, z)
im = func(np.mgrid[:outshape[1]], np.mgrid[:outshape[0]])
if doplot:
# Get min/max to use same colorbar on for base and overlay
pmin = im.min()
pmax = im.max()
fig, ax = plt.subplots()
# Show interpolated 2D image
p = ax.imshow(im, vmin=pmin, vmax=pmax)
# Overlay data points used for interpolation
ax.scatter(x, y, s=100, c=z, vmin=pmin, vmax=pmax, marker='s')
# Display colorbar.
# Shrink to make it same width as display.
c = fig.colorbar(p, orientation='horizontal', shrink=0.7)
c.set_label('Pixel value')
# Plot labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Interpolated image')
plt.draw()
return im
| 8,792
|
def setup_app(app_name=__name__, db_uri=None):
"""
Set up Flask application and database.
Args:
app_name: Name of the Flask application.
db_uri: Database URI for SQLAlchemy to connected to.
"""
global app, db
# Flask application
app = Flask(app_name)
# Application configuration
app.config.update({
"SQLALCHEMY_DATABASE_URI": db_uri,
"SQLALCHEMY_TRACK_MODIFICATIONS": False
})
# Database object
db = SQLAlchemy(app)
# Depot
DepotManager.configure("default", {
"depot.storage_path": DATA_ROOT
})
app.wsgi_app = DepotManager.make_middleware(app.wsgi_app, replace_wsgi_filewrapper=True)
# Import all related modules
import_module("app.models")
import_module("app.views")
| 8,793
|
def run_cmd(cmd: Text, split: bool = True, shell=False, verbose: bool = True):
"""Run a system command and print output."""
print(f'CMD: {cmd}')
cmd = shlex.split(cmd) if split else [cmd]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
while True:
output = process.stdout.readline().decode('utf-8').strip()
if output == '' and process.poll() is not None:
break
if output and verbose:
print(output)
return_code = process.poll()
if return_code != 0:
print(f'\tERROR ({return_code}) running command!')
return return_code
| 8,794
|
def upload_command():
"""
Upload object into MWDB
"""
pass
| 8,795
|
def _nms_boxes(detections, nms_threshold):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding
boxes with their confidence scores and return an array with the
indexes of the bounding boxes we want to keep.
# Args
detections: Nx7 numpy arrays of
[[x, y, w, h, box_confidence, class_id, class_prob],
......]
"""
x_coord = detections[:, 0]
y_coord = detections[:, 1]
width = detections[:, 2]
height = detections[:, 3]
box_confidences = detections[:, 4] * detections[:, 6]
areas = width * height
ordered = box_confidences.argsort()[::-1]
keep = list()
while ordered.size > 0:
# Index of the current element:
i = ordered[0]
keep.append(i)
xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]])
yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]])
xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]])
yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]])
width1 = np.maximum(0.0, xx2 - xx1 + 1)
height1 = np.maximum(0.0, yy2 - yy1 + 1)
intersection = width1 * height1
union = (areas[i] + areas[ordered[1:]] - intersection)
iou = intersection / union
indexes = np.where(iou <= nms_threshold)[0]
ordered = ordered[indexes + 1]
keep = np.array(keep)
return keep
| 8,796
|
def test_list_registered_one_private(caplog, store_mock):
"""List registered with one private item in the response."""
caplog.set_level(logging.INFO, logger="charmcraft.commands")
store_response = [
Charm(name='charm', private=True, status='status'),
]
store_mock.list_registered_names.return_value = store_response
ListNamesCommand('group').run(noargs)
assert store_mock.mock_calls == [
call.list_registered_names(),
]
expected = [
"Name Visibility Status",
"charm private status",
]
assert expected == [rec.message for rec in caplog.records]
| 8,797
|
def dht_get_key(data_key):
"""
Given a key (a hash of data), go fetch the data.
"""
dht_client = get_dht_client()
ret = dht_client.get(data_key)
if ret is not None:
if type(ret) == types.ListType:
ret = ret[0]
if type(ret) == types.DictType and ret.has_key("value"):
ret = ret["value"]
else:
raise Exception("No data returned from %s" % data_key)
return ret
| 8,798
|
def dumpJSON(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, int_as_string_bitcount=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. An integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and int_as_string_bitcount is None
and not kw
):
iterable = _default_json_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
| 8,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.